2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
60 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62 static int debug = -1; /* defaults above */
63 module_param(debug, int, 0664);
64 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 static int qlge_irq_type = MSIX_IRQ;
70 module_param(qlge_irq_type, int, 0664);
71 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73 static int qlge_mpi_coredump;
74 module_param(qlge_mpi_coredump, int, 0);
75 MODULE_PARM_DESC(qlge_mpi_coredump,
76 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78 static int qlge_force_coredump;
79 module_param(qlge_force_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_force_coredump,
81 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83 static const struct pci_device_id qlge_pci_tbl[] = {
84 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
86 /* required last entry */
90 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92 static int ql_wol(struct ql_adapter *);
93 static void qlge_set_multicast_list(struct net_device *);
94 static int ql_adapter_down(struct ql_adapter *);
95 static int ql_adapter_up(struct ql_adapter *);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
141 unsigned int wait_count = 30;
144 if (!ql_sem_trylock(qdev, sem_mask))
147 } while (--wait_count);
151 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
153 ql_write32(qdev, SEM, sem_mask);
154 ql_read32(qdev, SEM); /* flush */
157 /* This function waits for a specific bit to come ready
158 * in a given register. It is used mostly by the initialize
159 * process, but is also used in kernel thread API such as
160 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
162 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 for (count = 0; count < UDELAY_COUNT; count++) {
168 temp = ql_read32(qdev, reg);
170 /* check for errors */
171 if (temp & err_bit) {
172 netif_alert(qdev, probe, qdev->ndev,
173 "register 0x%.08x access error, value = 0x%.08x!.\n",
176 } else if (temp & bit) {
179 udelay(UDELAY_DELAY);
181 netif_alert(qdev, probe, qdev->ndev,
182 "Timed out waiting for reg %x to come ready.\n", reg);
186 /* The CFG register is used to download TX and RX control blocks
187 * to the chip. This function waits for an operation to complete.
189 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 for (count = 0; count < UDELAY_COUNT; count++) {
195 temp = ql_read32(qdev, CFG);
200 udelay(UDELAY_DELAY);
205 /* Used to issue init control blocks to hw. Maps control block,
206 * sets address, triggers download, waits for completion.
208 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
217 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
218 direction = DMA_TO_DEVICE;
220 direction = DMA_FROM_DEVICE;
222 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
223 if (dma_mapping_error(&qdev->pdev->dev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 status = ql_wait_cfg(qdev, bit);
234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
239 ql_write32(qdev, ICB_L, (u32)map);
240 ql_write32(qdev, ICB_H, (u32)(map >> 32));
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
247 * Wait for the bit to clear after signaling hw.
249 status = ql_wait_cfg(qdev, bit);
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
253 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
257 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
258 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
265 case MAC_ADDR_TYPE_MULTI_MAC:
266 case MAC_ADDR_TYPE_CAM_MAC: {
267 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270 ql_write32(qdev, MAC_ADDR_IDX,
271 (offset++) | /* offset */
272 (index << MAC_ADDR_IDX_SHIFT) | /* index */
273 MAC_ADDR_ADR | MAC_ADDR_RS |
275 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
279 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
282 ql_write32(qdev, MAC_ADDR_IDX,
283 (offset++) | /* offset */
284 (index << MAC_ADDR_IDX_SHIFT) | /* index */
285 MAC_ADDR_ADR | MAC_ADDR_RS |
287 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
290 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
291 if (type == MAC_ADDR_TYPE_CAM_MAC) {
292 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
296 ql_write32(qdev, MAC_ADDR_IDX,
297 (offset++) | /* offset */
299 << MAC_ADDR_IDX_SHIFT) | /* index */
301 MAC_ADDR_RS | type); /* type */
302 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 case MAC_ADDR_TYPE_VLAN:
311 case MAC_ADDR_TYPE_MULTI_FLTR:
313 netif_crit(qdev, ifup, qdev->ndev,
314 "Address type %d not yet supported.\n", type);
320 /* Set up a MAC, multicast or VLAN address for the
321 * inbound frame matching.
323 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
330 case MAC_ADDR_TYPE_MULTI_MAC: {
331 u32 upper = (addr[0] << 8) | addr[1];
332 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
338 ql_write32(qdev, MAC_ADDR_IDX,
339 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 ql_write32(qdev, MAC_ADDR_DATA, lower);
342 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
345 ql_write32(qdev, MAC_ADDR_IDX,
346 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
349 ql_write32(qdev, MAC_ADDR_DATA, upper);
350 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
353 case MAC_ADDR_TYPE_CAM_MAC: {
355 u32 upper = (addr[0] << 8) | addr[1];
356 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
361 ql_write32(qdev, MAC_ADDR_IDX,
362 (offset++) | /* offset */
363 (index << MAC_ADDR_IDX_SHIFT) | /* index */
365 ql_write32(qdev, MAC_ADDR_DATA, lower);
366 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 ql_write32(qdev, MAC_ADDR_IDX,
370 (offset++) | /* offset */
371 (index << MAC_ADDR_IDX_SHIFT) | /* index */
373 ql_write32(qdev, MAC_ADDR_DATA, upper);
374 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
377 ql_write32(qdev, MAC_ADDR_IDX,
378 (offset) | /* offset */
379 (index << MAC_ADDR_IDX_SHIFT) | /* index */
381 /* This field should also include the queue id
382 * and possibly the function id. Right now we hardcode
383 * the route field to NIC core.
385 cam_output = (CAM_OUT_ROUTE_NIC |
386 (qdev->func << CAM_OUT_FUNC_SHIFT) |
387 (0 << CAM_OUT_CQ_ID_SHIFT));
388 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
389 cam_output |= CAM_OUT_RV;
390 /* route to NIC core */
391 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
394 case MAC_ADDR_TYPE_VLAN: {
395 u32 enable_bit = *((u32 *)&addr[0]);
396 /* For VLAN, the addr actually holds a bit that
397 * either enables or disables the vlan id we are
398 * addressing. It's either MAC_ADDR_E on or off.
399 * That's bit-27 we're talking about.
401 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
404 ql_write32(qdev, MAC_ADDR_IDX,
405 offset | /* offset */
406 (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 enable_bit); /* enable/disable */
411 case MAC_ADDR_TYPE_MULTI_FLTR:
413 netif_crit(qdev, ifup, qdev->ndev,
414 "Address type %d not yet supported.\n", type);
420 /* Set or clear MAC address in hardware. We sometimes
421 * have to clear it to prevent wrong frame routing
422 * especially in a bonding environment.
424 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
427 char zero_mac_addr[ETH_ALEN];
431 addr = &qdev->current_mac_addr[0];
432 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
433 "Set Mac addr %pM\n", addr);
435 eth_zero_addr(zero_mac_addr);
436 addr = &zero_mac_addr[0];
437 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
438 "Clearing MAC address\n");
440 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
443 status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
444 MAC_ADDR_TYPE_CAM_MAC,
445 qdev->func * MAX_CQ);
446 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 netif_err(qdev, ifup, qdev->ndev,
449 "Failed to init mac address.\n");
453 void ql_link_on(struct ql_adapter *qdev)
455 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
456 netif_carrier_on(qdev->ndev);
457 ql_set_mac_addr(qdev, 1);
460 void ql_link_off(struct ql_adapter *qdev)
462 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
467 /* Get a specific frame routing value from the CAM.
468 * Used for debug and reg dump.
470 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
474 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
478 ql_write32(qdev, RT_IDX,
479 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
480 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
483 *value = ql_read32(qdev, RT_DATA);
488 /* The NIC function for this chip has 16 routing indexes. Each one can be used
489 * to route different frame types to various inbound queues. We send broadcast/
490 * multicast/error frames to the default queue for slow handling,
491 * and CAM hit/RSS frames to the fast handling queues.
493 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
496 int status = -EINVAL; /* Return error if no mask match. */
502 value = RT_IDX_DST_CAM_Q | /* dest */
503 RT_IDX_TYPE_NICQ | /* type */
504 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
507 case RT_IDX_VALID: /* Promiscuous Mode frames. */
509 value = RT_IDX_DST_DFLT_Q | /* dest */
510 RT_IDX_TYPE_NICQ | /* type */
511 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
514 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
516 value = RT_IDX_DST_DFLT_Q | /* dest */
517 RT_IDX_TYPE_NICQ | /* type */
518 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
521 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
523 value = RT_IDX_DST_DFLT_Q | /* dest */
524 RT_IDX_TYPE_NICQ | /* type */
525 (RT_IDX_IP_CSUM_ERR_SLOT <<
526 RT_IDX_IDX_SHIFT); /* index */
529 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
531 value = RT_IDX_DST_DFLT_Q | /* dest */
532 RT_IDX_TYPE_NICQ | /* type */
533 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
534 RT_IDX_IDX_SHIFT); /* index */
537 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
544 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
546 value = RT_IDX_DST_DFLT_Q | /* dest */
547 RT_IDX_TYPE_NICQ | /* type */
548 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
551 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
553 value = RT_IDX_DST_DFLT_Q | /* dest */
554 RT_IDX_TYPE_NICQ | /* type */
555 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
558 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
560 value = RT_IDX_DST_RSS | /* dest */
561 RT_IDX_TYPE_NICQ | /* type */
562 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 case 0: /* Clear the E-bit on an entry. */
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (index << RT_IDX_IDX_SHIFT);/* index */
573 netif_err(qdev, ifup, qdev->ndev,
574 "Mask type %d not yet supported.\n", mask);
580 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
583 value |= (enable ? RT_IDX_E : 0);
584 ql_write32(qdev, RT_IDX, value);
585 ql_write32(qdev, RT_DATA, enable ? mask : 0);
591 static void ql_enable_interrupts(struct ql_adapter *qdev)
593 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
596 static void ql_disable_interrupts(struct ql_adapter *qdev)
598 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
601 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603 struct intr_context *ctx = &qdev->intr_context[intr];
605 ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
608 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
610 struct intr_context *ctx = &qdev->intr_context[intr];
612 ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
615 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
619 for (i = 0; i < qdev->intr_count; i++)
620 ql_enable_completion_interrupt(qdev, i);
623 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
627 __le16 *flash = (__le16 *)&qdev->flash;
629 status = strncmp((char *)&qdev->flash, str, 4);
631 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
635 for (i = 0; i < size; i++)
636 csum += le16_to_cpu(*flash++);
639 netif_err(qdev, ifup, qdev->ndev,
640 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
645 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
648 /* wait for reg to come ready */
649 status = ql_wait_reg_rdy(qdev,
650 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
653 /* set up for reg read */
654 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
655 /* wait for reg to come ready */
656 status = ql_wait_reg_rdy(qdev,
657 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
660 /* This data is stored on flash as an array of
661 * __le32. Since ql_read32() returns cpu endian
662 * we need to swap it back.
664 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
669 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
673 __le32 *p = (__le32 *)&qdev->flash;
677 /* Get flash offset for function and adjust
681 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
688 size = sizeof(struct flash_params_8000) / sizeof(u32);
689 for (i = 0; i < size; i++, p++) {
690 status = ql_read_flash_word(qdev, i + offset, p);
692 netif_err(qdev, ifup, qdev->ndev,
693 "Error reading flash.\n");
698 status = ql_validate_flash(qdev,
699 sizeof(struct flash_params_8000) /
703 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
708 /* Extract either manufacturer or BOFM modified
711 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 qdev->flash.flash_params_8000.mac_addr1,
714 qdev->ndev->addr_len);
717 qdev->flash.flash_params_8000.mac_addr,
718 qdev->ndev->addr_len);
720 if (!is_valid_ether_addr(mac_addr)) {
721 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
726 memcpy(qdev->ndev->dev_addr,
728 qdev->ndev->addr_len);
731 ql_sem_unlock(qdev, SEM_FLASH_MASK);
735 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
739 __le32 *p = (__le32 *)&qdev->flash;
741 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
743 /* Second function's parameters follow the first
749 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
752 for (i = 0; i < size; i++, p++) {
753 status = ql_read_flash_word(qdev, i + offset, p);
755 netif_err(qdev, ifup, qdev->ndev,
756 "Error reading flash.\n");
762 status = ql_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
782 ql_sem_unlock(qdev, SEM_FLASH_MASK);
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787 * register pair. Each read/write requires us to wait for the ready
788 * bit before reading/writing the data.
790 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
793 /* wait for reg to come ready */
794 status = ql_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
798 /* write the data to the data reg */
799 ql_write32(qdev, XGMAC_DATA, data);
800 /* trigger the write */
801 ql_write32(qdev, XGMAC_ADDR, reg);
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806 * register pair. Each read/write requires us to wait for the ready
807 * bit before reading/writing the data.
809 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
812 /* wait for reg to come ready */
813 status = ql_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
817 /* set up for reg read */
818 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819 /* wait for reg to come ready */
820 status = ql_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
825 *data = ql_read32(qdev, XGMAC_DATA);
830 /* This is used for reading the 64-bit statistics regs. */
831 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
837 status = ql_read_xgmac_reg(qdev, reg, &lo);
841 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
845 *data = (u64)lo | ((u64)hi << 32);
851 static int ql_8000_port_initialize(struct ql_adapter *qdev)
855 * Get MPI firmware version for driver banner
858 status = ql_mb_about_fw(qdev);
861 status = ql_mb_get_fw_state(qdev);
864 /* Wake up a worker to get/set the TX/RX frame sizes. */
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
870 /* Take the MAC Core out of reset.
871 * Enable statistics counting.
872 * Take the transmitter/receiver out of reset.
873 * This functionality may be done in the MPI firmware at a
876 static int ql_8012_port_initialize(struct ql_adapter *qdev)
881 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
882 /* Another function has the semaphore, so
883 * wait for the port init bit to come ready.
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896 /* Set the core reset. */
897 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
900 data |= GLOBAL_CFG_RESET;
901 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
905 /* Clear the core reset and turn on jumbo for receiver. */
906 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
907 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
914 /* Enable transmitter, and clear it's reset. */
915 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
918 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
919 data |= TX_CFG_EN; /* Enable the transmitter. */
920 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
924 /* Enable receiver and clear it's reset. */
925 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
928 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
929 data |= RX_CFG_EN; /* Enable the receiver. */
930 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
936 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
940 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
944 /* Signal to the world that the port is enabled. */
945 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
947 ql_sem_unlock(qdev, qdev->xg_sem_mask);
951 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
953 return PAGE_SIZE << qdev->lbq_buf_order;
956 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
958 struct qlge_bq_desc *bq_desc;
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
966 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
967 struct rx_ring *rx_ring)
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 ql_lbq_block_size(qdev)) {
976 /* last chunk of the master page */
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
984 /* Update an rx ring index. */
985 static void ql_update_cq(struct rx_ring *rx_ring)
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
995 static void ql_write_cq_idx(struct rx_ring *rx_ring)
997 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1000 static const char * const bq_type_name[] = {
1005 /* return 0 or negative error */
1006 static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1009 struct ql_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1012 if (sbq_desc->p.skb)
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1022 skb_reserve(skb, QLGE_SB_PAD);
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1034 sbq_desc->p.skb = skb;
1038 /* return 0 or negative error */
1039 static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1042 struct ql_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1045 if (!master_chunk->page) {
1047 dma_addr_t dma_addr;
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 ql_lbq_block_size(qdev),
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1072 /* Adjust the master page chunk for next
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1086 /* return 0 or negative error */
1087 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct ql_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1123 bq_desc = &bq->queue[0];
1127 } while (refill_count);
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1136 ql_write_db_reg(i, bq->prod_idx_db_reg);
1138 bq->next_to_use = i;
1144 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1147 bool sbq_fail, lbq_fail;
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1152 /* Minimum number of buffers needed to be able to receive at least one
1153 * frame of any format:
1154 * sbq: 1 for header + 1 for data
1155 * lbq: mtu 9000 / lb size
1156 * Below this, the queue might stall.
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161 /* Allocations can take a long time in certain cases (ex.
1162 * reclaim). Therefore, use a workqueue for long-running
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1169 static void qlge_slow_refill(struct work_struct *work)
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1173 struct napi_struct *napi = &rx_ring->napi;
1176 ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1180 /* napi_disable() might have prevented incomplete napi work from being
1183 napi_schedule(napi);
1184 /* trigger softirq processing */
1188 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1189 * fails at some stage, or from the interrupt when a tx completes.
1191 static void ql_unmap_send(struct ql_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1199 * Unmap the skb->data area, or the
1200 * external sglist (AKA the Outbound
1201 * Address List (OAL)).
1202 * If its the zeroeth element, then it's
1203 * the skb->data area. If it's the 7th
1204 * element and there is more than 6 frags,
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1210 "unmapping OAL area.\n");
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1215 dma_unmap_len(&tx_ring_desc->map[i],
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1231 /* Map the buffers for this transmit. This will return
1232 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1234 static int ql_map_send(struct ql_adapter *qdev,
1235 struct ob_mac_iocb_req *mac_iocb_ptr,
1236 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1238 int len = skb_headlen(skb);
1240 int frag_idx, err, map_idx = 0;
1241 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1242 int frag_cnt = skb_shinfo(skb)->nr_frags;
1245 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1246 "frag_cnt = %d.\n", frag_cnt);
1249 * Map the skb buffer first.
1251 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1253 err = dma_mapping_error(&qdev->pdev->dev, map);
1255 netif_err(qdev, tx_queued, qdev->ndev,
1256 "PCI mapping failed with error: %d\n", err);
1258 return NETDEV_TX_BUSY;
1261 tbd->len = cpu_to_le32(len);
1262 tbd->addr = cpu_to_le64(map);
1263 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1264 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1268 * This loop fills the remainder of the 8 address descriptors
1269 * in the IOCB. If there are more than 7 fragments, then the
1270 * eighth address desc will point to an external list (OAL).
1271 * When this happens, the remainder of the frags will be stored
1274 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1275 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1278 if (frag_idx == 6 && frag_cnt > 7) {
1279 /* Let's tack on an sglist.
1280 * Our control block will now
1282 * iocb->seg[0] = skb->data
1283 * iocb->seg[1] = frag[0]
1284 * iocb->seg[2] = frag[1]
1285 * iocb->seg[3] = frag[2]
1286 * iocb->seg[4] = frag[3]
1287 * iocb->seg[5] = frag[4]
1288 * iocb->seg[6] = frag[5]
1289 * iocb->seg[7] = ptr to OAL (external sglist)
1290 * oal->seg[0] = frag[6]
1291 * oal->seg[1] = frag[7]
1292 * oal->seg[2] = frag[8]
1293 * oal->seg[3] = frag[9]
1294 * oal->seg[4] = frag[10]
1297 /* Tack on the OAL in the eighth segment of IOCB. */
1298 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1301 err = dma_mapping_error(&qdev->pdev->dev, map);
1303 netif_err(qdev, tx_queued, qdev->ndev,
1304 "PCI mapping outbound address list with error: %d\n",
1309 tbd->addr = cpu_to_le64(map);
1311 * The length is the number of fragments
1312 * that remain to be mapped times the length
1313 * of our sglist (OAL).
1316 cpu_to_le32((sizeof(struct tx_buf_desc) *
1317 (frag_cnt - frag_idx)) | TX_DESC_C);
1318 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1320 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1321 sizeof(struct oal));
1322 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1326 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1329 err = dma_mapping_error(&qdev->pdev->dev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping frags failed with error: %d.\n",
1337 tbd->addr = cpu_to_le64(map);
1338 tbd->len = cpu_to_le32(skb_frag_size(frag));
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1341 skb_frag_size(frag));
1344 /* Save the number of segments we've mapped. */
1345 tx_ring_desc->map_cnt = map_idx;
1346 /* Terminate the last segment. */
1347 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1348 return NETDEV_TX_OK;
1352 * If the first frag mapping failed, then i will be zero.
1353 * This causes the unmap of the skb->data area. Otherwise
1354 * we pass in the number of frags that mapped successfully
1355 * so they can be umapped.
1357 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1358 return NETDEV_TX_BUSY;
1361 /* Categorizing receive firmware frame errors */
1362 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1363 struct rx_ring *rx_ring)
1365 struct nic_stats *stats = &qdev->nic_stats;
1367 stats->rx_err_count++;
1368 rx_ring->rx_errors++;
1370 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1371 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1372 stats->rx_code_err++;
1374 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1375 stats->rx_oversize_err++;
1377 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1378 stats->rx_undersize_err++;
1380 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1381 stats->rx_preamble_err++;
1383 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1384 stats->rx_frame_len_err++;
1386 case IB_MAC_IOCB_RSP_ERR_CRC:
1387 stats->rx_crc_err++;
1394 * ql_update_mac_hdr_len - helper routine to update the mac header length
1395 * based on vlan tags if present
1397 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1398 struct ib_mac_iocb_rsp *ib_mac_rsp,
1399 void *page, size_t *len)
1403 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1405 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1407 /* Look for stacked vlan tags in ethertype field */
1408 if (tags[6] == ETH_P_8021Q &&
1409 tags[8] == ETH_P_8021Q)
1410 *len += 2 * VLAN_HLEN;
1416 /* Process an inbound completion from an rx ring. */
1417 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1418 struct rx_ring *rx_ring,
1419 struct ib_mac_iocb_rsp *ib_mac_rsp,
1420 u32 length, u16 vlan_id)
1422 struct sk_buff *skb;
1423 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1424 struct napi_struct *napi = &rx_ring->napi;
1426 /* Frame error, so drop the packet. */
1427 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1428 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1429 put_page(lbq_desc->p.pg_chunk.page);
1432 napi->dev = qdev->ndev;
1434 skb = napi_get_frags(napi);
1436 netif_err(qdev, drv, qdev->ndev,
1437 "Couldn't get an skb, exiting.\n");
1438 rx_ring->rx_dropped++;
1439 put_page(lbq_desc->p.pg_chunk.page);
1442 prefetch(lbq_desc->p.pg_chunk.va);
1443 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1444 lbq_desc->p.pg_chunk.page,
1445 lbq_desc->p.pg_chunk.offset,
1449 skb->data_len += length;
1450 skb->truesize += length;
1451 skb_shinfo(skb)->nr_frags++;
1453 rx_ring->rx_packets++;
1454 rx_ring->rx_bytes += length;
1455 skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 skb_record_rx_queue(skb, rx_ring->cq_id);
1457 if (vlan_id != 0xffff)
1458 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1459 napi_gro_frags(napi);
1462 /* Process an inbound completion from an rx ring. */
1463 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1464 struct rx_ring *rx_ring,
1465 struct ib_mac_iocb_rsp *ib_mac_rsp,
1466 u32 length, u16 vlan_id)
1468 struct net_device *ndev = qdev->ndev;
1469 struct sk_buff *skb = NULL;
1471 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1472 struct napi_struct *napi = &rx_ring->napi;
1473 size_t hlen = ETH_HLEN;
1475 skb = netdev_alloc_skb(ndev, length);
1477 rx_ring->rx_dropped++;
1478 put_page(lbq_desc->p.pg_chunk.page);
1482 addr = lbq_desc->p.pg_chunk.va;
1485 /* Frame error, so drop the packet. */
1486 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1487 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1491 /* Update the MAC header length*/
1492 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1494 /* The max framesize filter on this chip is set higher than
1495 * MTU since FCoE uses 2k frames.
1497 if (skb->len > ndev->mtu + hlen) {
1498 netif_err(qdev, drv, qdev->ndev,
1499 "Segment too small, dropping.\n");
1500 rx_ring->rx_dropped++;
1503 skb_put_data(skb, addr, hlen);
1504 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1505 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1507 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1508 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1509 skb->len += length - hlen;
1510 skb->data_len += length - hlen;
1511 skb->truesize += length - hlen;
1513 rx_ring->rx_packets++;
1514 rx_ring->rx_bytes += skb->len;
1515 skb->protocol = eth_type_trans(skb, ndev);
1516 skb_checksum_none_assert(skb);
1518 if ((ndev->features & NETIF_F_RXCSUM) &&
1519 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1521 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1522 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1523 "TCP checksum done!\n");
1524 skb->ip_summed = CHECKSUM_UNNECESSARY;
1525 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1526 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1527 /* Unfragmented ipv4 UDP frame. */
1529 (struct iphdr *)((u8 *)addr + hlen);
1530 if (!(iph->frag_off &
1531 htons(IP_MF | IP_OFFSET))) {
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 netif_printk(qdev, rx_status, KERN_DEBUG,
1535 "UDP checksum done!\n");
1540 skb_record_rx_queue(skb, rx_ring->cq_id);
1541 if (vlan_id != 0xffff)
1542 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1543 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1544 napi_gro_receive(napi, skb);
1546 netif_receive_skb(skb);
1549 dev_kfree_skb_any(skb);
1550 put_page(lbq_desc->p.pg_chunk.page);
1553 /* Process an inbound completion from an rx ring. */
1554 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1555 struct rx_ring *rx_ring,
1556 struct ib_mac_iocb_rsp *ib_mac_rsp,
1557 u32 length, u16 vlan_id)
1559 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1560 struct net_device *ndev = qdev->ndev;
1561 struct sk_buff *skb, *new_skb;
1563 skb = sbq_desc->p.skb;
1564 /* Allocate new_skb and copy */
1565 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1567 rx_ring->rx_dropped++;
1570 skb_reserve(new_skb, NET_IP_ALIGN);
1572 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1573 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1575 skb_put_data(new_skb, skb->data, length);
1579 /* Frame error, so drop the packet. */
1580 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1581 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1582 dev_kfree_skb_any(skb);
1586 /* loopback self test for ethtool */
1587 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1588 ql_check_lb_frame(qdev, skb);
1589 dev_kfree_skb_any(skb);
1593 /* The max framesize filter on this chip is set higher than
1594 * MTU since FCoE uses 2k frames.
1596 if (skb->len > ndev->mtu + ETH_HLEN) {
1597 dev_kfree_skb_any(skb);
1598 rx_ring->rx_dropped++;
1602 prefetch(skb->data);
1603 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1604 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1606 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1607 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1608 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1609 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1614 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1615 "Promiscuous Packet.\n");
1617 rx_ring->rx_packets++;
1618 rx_ring->rx_bytes += skb->len;
1619 skb->protocol = eth_type_trans(skb, ndev);
1620 skb_checksum_none_assert(skb);
1622 /* If rx checksum is on, and there are no
1623 * csum or frame errors.
1625 if ((ndev->features & NETIF_F_RXCSUM) &&
1626 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1628 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1629 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 "TCP checksum done!\n");
1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1633 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1634 /* Unfragmented ipv4 UDP frame. */
1635 struct iphdr *iph = (struct iphdr *)skb->data;
1637 if (!(iph->frag_off &
1638 htons(IP_MF | IP_OFFSET))) {
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 netif_printk(qdev, rx_status, KERN_DEBUG,
1642 "UDP checksum done!\n");
1647 skb_record_rx_queue(skb, rx_ring->cq_id);
1648 if (vlan_id != 0xffff)
1649 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1650 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1651 napi_gro_receive(&rx_ring->napi, skb);
1653 netif_receive_skb(skb);
1656 static void ql_realign_skb(struct sk_buff *skb, int len)
1658 void *temp_addr = skb->data;
1660 /* Undo the skb_reserve(skb,32) we did before
1661 * giving to hardware, and realign data on
1662 * a 2-byte boundary.
1664 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1666 memmove(skb->data, temp_addr, len);
1670 * This function builds an skb for the given inbound
1671 * completion. It will be rewritten for readability in the near
1672 * future, but for not it works well.
1674 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1675 struct rx_ring *rx_ring,
1676 struct ib_mac_iocb_rsp *ib_mac_rsp)
1678 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1679 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1680 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1681 struct sk_buff *skb = NULL;
1682 size_t hlen = ETH_HLEN;
1685 * Handle the header buffer if present.
1687 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1688 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Header of %d bytes in small buffer.\n", hdr_len);
1692 * Headers fit nicely into a small buffer.
1694 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1695 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1696 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1697 skb = sbq_desc->p.skb;
1698 ql_realign_skb(skb, hdr_len);
1699 skb_put(skb, hdr_len);
1700 sbq_desc->p.skb = NULL;
1704 * Handle the data buffer(s).
1706 if (unlikely(!length)) { /* Is there data too? */
1707 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1708 "No Data buffer in this packet.\n");
1712 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1713 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "Headers in small, data of %d bytes in small, combine them.\n",
1718 * Data is less than small buffer size so it's
1719 * stuffed in a small buffer.
1720 * For this case we append the data
1721 * from the "data" small buffer to the "header" small
1724 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1725 dma_sync_single_for_cpu(&qdev->pdev->dev,
1729 skb_put_data(skb, sbq_desc->p.skb->data, length);
1731 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732 "%d bytes in a single small buffer.\n",
1734 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1735 skb = sbq_desc->p.skb;
1736 ql_realign_skb(skb, length);
1737 skb_put(skb, length);
1738 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1741 sbq_desc->p.skb = NULL;
1743 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1744 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1745 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1746 "Header in small, %d bytes in large. Chain large to small!\n",
1749 * The data is in a single large buffer. We
1750 * chain it to the header buffer's skb and let
1753 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Chaining page at offset = %d, for %d bytes to skb.\n",
1756 lbq_desc->p.pg_chunk.offset, length);
1757 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1758 lbq_desc->p.pg_chunk.offset, length);
1760 skb->data_len += length;
1761 skb->truesize += length;
1764 * The headers and data are in a single large buffer. We
1765 * copy it to a new skb and let it go. This can happen with
1766 * jumbo mtu on a non-TCP/UDP frame.
1768 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769 skb = netdev_alloc_skb(qdev->ndev, length);
1771 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1772 "No skb available, drop the packet.\n");
1775 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1778 skb_reserve(skb, NET_IP_ALIGN);
1779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1782 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1783 lbq_desc->p.pg_chunk.offset,
1786 skb->data_len += length;
1787 skb->truesize += length;
1788 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1789 lbq_desc->p.pg_chunk.va,
1791 __pskb_pull_tail(skb, hlen);
1795 * The data is in a chain of large buffers
1796 * pointed to by a small buffer. We loop
1797 * thru and chain them to the our small header
1799 * frags: There are 18 max frags and our small
1800 * buffer will hold 32 of them. The thing is,
1801 * we'll use 3 max for our 9000 byte jumbo
1802 * frames. If the MTU goes up we could
1803 * eventually be in trouble.
1807 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1808 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1809 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1810 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1812 * This is an non TCP/UDP IP frame, so
1813 * the headers aren't split into a small
1814 * buffer. We have to use the small buffer
1815 * that contains our sg list as our skb to
1816 * send upstairs. Copy the sg list here to
1817 * a local buffer and use it to find the
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes of headers & data in chain of large.\n",
1823 skb = sbq_desc->p.skb;
1824 sbq_desc->p.skb = NULL;
1825 skb_reserve(skb, NET_IP_ALIGN);
1828 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1829 size = min(length, qdev->lbq_buf_size);
1831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "Adding page %d to skb for %d bytes.\n",
1834 skb_fill_page_desc(skb, i,
1835 lbq_desc->p.pg_chunk.page,
1836 lbq_desc->p.pg_chunk.offset, size);
1838 skb->data_len += size;
1839 skb->truesize += size;
1842 } while (length > 0);
1843 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1845 __pskb_pull_tail(skb, hlen);
1850 /* Process an inbound completion from an rx ring. */
1851 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1852 struct rx_ring *rx_ring,
1853 struct ib_mac_iocb_rsp *ib_mac_rsp,
1856 struct net_device *ndev = qdev->ndev;
1857 struct sk_buff *skb = NULL;
1859 QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
1861 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1862 if (unlikely(!skb)) {
1863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "No skb available, drop packet.\n");
1865 rx_ring->rx_dropped++;
1869 /* Frame error, so drop the packet. */
1870 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1871 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1872 dev_kfree_skb_any(skb);
1876 /* The max framesize filter on this chip is set higher than
1877 * MTU since FCoE uses 2k frames.
1879 if (skb->len > ndev->mtu + ETH_HLEN) {
1880 dev_kfree_skb_any(skb);
1881 rx_ring->rx_dropped++;
1885 /* loopback self test for ethtool */
1886 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1887 ql_check_lb_frame(qdev, skb);
1888 dev_kfree_skb_any(skb);
1892 prefetch(skb->data);
1893 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1894 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1895 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1896 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1897 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1898 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1899 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1900 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1901 rx_ring->rx_multicast++;
1903 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "Promiscuous Packet.\n");
1908 skb->protocol = eth_type_trans(skb, ndev);
1909 skb_checksum_none_assert(skb);
1911 /* If rx checksum is on, and there are no
1912 * csum or frame errors.
1914 if ((ndev->features & NETIF_F_RXCSUM) &&
1915 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1917 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "TCP checksum done!\n");
1920 skb->ip_summed = CHECKSUM_UNNECESSARY;
1921 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1922 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1923 /* Unfragmented ipv4 UDP frame. */
1924 struct iphdr *iph = (struct iphdr *)skb->data;
1926 if (!(iph->frag_off &
1927 htons(IP_MF | IP_OFFSET))) {
1928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1930 "TCP checksum done!\n");
1935 rx_ring->rx_packets++;
1936 rx_ring->rx_bytes += skb->len;
1937 skb_record_rx_queue(skb, rx_ring->cq_id);
1938 if (vlan_id != 0xffff)
1939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1940 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1941 napi_gro_receive(&rx_ring->napi, skb);
1943 netif_receive_skb(skb);
1946 /* Process an inbound completion from an rx ring. */
1947 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1948 struct rx_ring *rx_ring,
1949 struct ib_mac_iocb_rsp *ib_mac_rsp)
1951 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1952 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1953 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1954 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1955 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1957 QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
1959 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1960 /* The data and headers are split into
1963 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1965 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1966 /* The data fit in a single small buffer.
1967 * Allocate a new skb, copy the data and
1968 * return the buffer to the free pool.
1970 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1972 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1973 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1974 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1975 /* TCP packet in a page chunk that's been checksummed.
1976 * Tack it on to our GRO skb and let it go.
1978 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1980 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1981 /* Non-TCP packet in a page chunk. Allocate an
1982 * skb, tack it on frags, and send it up.
1984 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1987 /* Non-TCP/UDP large frames that span multiple buffers
1988 * can be processed corrrectly by the split frame logic.
1990 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1994 return (unsigned long)length;
1997 /* Process an outbound completion from an rx ring. */
1998 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1999 struct ob_mac_iocb_rsp *mac_rsp)
2001 struct tx_ring *tx_ring;
2002 struct tx_ring_desc *tx_ring_desc;
2004 QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
2005 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2006 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2007 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2008 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2009 tx_ring->tx_packets++;
2010 dev_kfree_skb(tx_ring_desc->skb);
2011 tx_ring_desc->skb = NULL;
2013 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2016 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2017 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2018 netif_warn(qdev, tx_done, qdev->ndev,
2019 "Total descriptor length did not match transfer length.\n");
2021 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2022 netif_warn(qdev, tx_done, qdev->ndev,
2023 "Frame too short to be valid, not sent.\n");
2025 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2026 netif_warn(qdev, tx_done, qdev->ndev,
2027 "Frame too long, but sent anyway.\n");
2029 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2030 netif_warn(qdev, tx_done, qdev->ndev,
2031 "PCI backplane error. Frame not sent.\n");
2034 atomic_inc(&tx_ring->tx_count);
2037 /* Fire up a handler to reset the MPI processor. */
2038 void ql_queue_fw_error(struct ql_adapter *qdev)
2041 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2044 void ql_queue_asic_error(struct ql_adapter *qdev)
2047 ql_disable_interrupts(qdev);
2048 /* Clear adapter up bit to signal the recovery
2049 * process that it shouldn't kill the reset worker
2052 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2053 /* Set asic recovery bit to indicate reset process that we are
2054 * in fatal error recovery process rather than normal close
2056 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2057 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2060 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2061 struct ib_ae_iocb_rsp *ib_ae_rsp)
2063 switch (ib_ae_rsp->event) {
2064 case MGMT_ERR_EVENT:
2065 netif_err(qdev, rx_err, qdev->ndev,
2066 "Management Processor Fatal Error.\n");
2067 ql_queue_fw_error(qdev);
2070 case CAM_LOOKUP_ERR_EVENT:
2071 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2072 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2073 ql_queue_asic_error(qdev);
2076 case SOFT_ECC_ERROR_EVENT:
2077 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2078 ql_queue_asic_error(qdev);
2081 case PCI_ERR_ANON_BUF_RD:
2082 netdev_err(qdev->ndev,
2083 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2085 ql_queue_asic_error(qdev);
2089 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2091 ql_queue_asic_error(qdev);
2096 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2098 struct ql_adapter *qdev = rx_ring->qdev;
2099 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2100 struct ob_mac_iocb_rsp *net_rsp = NULL;
2103 struct tx_ring *tx_ring;
2104 /* While there are entries in the completion queue. */
2105 while (prod != rx_ring->cnsmr_idx) {
2107 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2108 "cq_id = %d, prod = %d, cnsmr = %d\n",
2109 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2111 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2113 switch (net_rsp->opcode) {
2115 case OPCODE_OB_MAC_TSO_IOCB:
2116 case OPCODE_OB_MAC_IOCB:
2117 ql_process_mac_tx_intr(qdev, net_rsp);
2120 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2125 ql_update_cq(rx_ring);
2126 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2130 ql_write_cq_idx(rx_ring);
2131 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2132 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2133 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2135 * The queue got stopped because the tx_ring was full.
2136 * Wake it up, because it's now at least 25% empty.
2138 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2144 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2146 struct ql_adapter *qdev = rx_ring->qdev;
2147 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2148 struct ql_net_rsp_iocb *net_rsp;
2151 /* While there are entries in the completion queue. */
2152 while (prod != rx_ring->cnsmr_idx) {
2154 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2155 "cq_id = %d, prod = %d, cnsmr = %d\n",
2156 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2158 net_rsp = rx_ring->curr_entry;
2160 switch (net_rsp->opcode) {
2161 case OPCODE_IB_MAC_IOCB:
2162 ql_process_mac_rx_intr(qdev, rx_ring,
2163 (struct ib_mac_iocb_rsp *)
2167 case OPCODE_IB_AE_IOCB:
2168 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2172 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2173 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2178 ql_update_cq(rx_ring);
2179 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 if (count == budget)
2183 ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2184 ql_write_cq_idx(rx_ring);
2188 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2190 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2191 struct ql_adapter *qdev = rx_ring->qdev;
2192 struct rx_ring *trx_ring;
2193 int i, work_done = 0;
2194 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2199 /* Service the TX rings first. They start
2200 * right after the RSS rings.
2202 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2203 trx_ring = &qdev->rx_ring[i];
2204 /* If this TX completion ring belongs to this vector and
2205 * it's not empty then service it.
2207 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2208 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2209 trx_ring->cnsmr_idx)) {
2210 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2211 "%s: Servicing TX completion ring %d.\n",
2212 __func__, trx_ring->cq_id);
2213 ql_clean_outbound_rx_ring(trx_ring);
2218 * Now service the RSS ring if it's active.
2220 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2221 rx_ring->cnsmr_idx) {
2222 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2223 "%s: Servicing RX completion ring %d.\n",
2224 __func__, rx_ring->cq_id);
2225 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2228 if (work_done < budget) {
2229 napi_complete_done(napi, work_done);
2230 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2235 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2237 struct ql_adapter *qdev = netdev_priv(ndev);
2239 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2240 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2241 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2243 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2248 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2249 * based on the features to enable/disable hardware vlan accel
2251 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2252 netdev_features_t features)
2254 struct ql_adapter *qdev = netdev_priv(ndev);
2256 bool need_restart = netif_running(ndev);
2259 status = ql_adapter_down(qdev);
2261 netif_err(qdev, link, qdev->ndev,
2262 "Failed to bring down the adapter\n");
2267 /* update the features with resent change */
2268 ndev->features = features;
2271 status = ql_adapter_up(qdev);
2273 netif_err(qdev, link, qdev->ndev,
2274 "Failed to bring up the adapter\n");
2282 static int qlge_set_features(struct net_device *ndev,
2283 netdev_features_t features)
2285 netdev_features_t changed = ndev->features ^ features;
2288 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2289 /* Update the behavior of vlan accel in the adapter */
2290 err = qlge_update_hw_vlan_features(ndev, features);
2294 qlge_vlan_mode(ndev, features);
2300 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2302 u32 enable_bit = MAC_ADDR_E;
2305 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2306 MAC_ADDR_TYPE_VLAN, vid);
2308 netif_err(qdev, ifup, qdev->ndev,
2309 "Failed to init vlan address.\n");
2313 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2315 struct ql_adapter *qdev = netdev_priv(ndev);
2319 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2323 err = __qlge_vlan_rx_add_vid(qdev, vid);
2324 set_bit(vid, qdev->active_vlans);
2326 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2331 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2336 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2337 MAC_ADDR_TYPE_VLAN, vid);
2339 netif_err(qdev, ifup, qdev->ndev,
2340 "Failed to clear vlan address.\n");
2344 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2346 struct ql_adapter *qdev = netdev_priv(ndev);
2350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2354 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2355 clear_bit(vid, qdev->active_vlans);
2357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2362 static void qlge_restore_vlan(struct ql_adapter *qdev)
2367 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2371 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2372 __qlge_vlan_rx_add_vid(qdev, vid);
2374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2377 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2378 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2380 struct rx_ring *rx_ring = dev_id;
2382 napi_schedule(&rx_ring->napi);
2386 /* This handles a fatal error, MPI activity, and the default
2387 * rx_ring in an MSI-X multiple vector environment.
2388 * In MSI/Legacy environment it also process the rest of
2391 static irqreturn_t qlge_isr(int irq, void *dev_id)
2393 struct rx_ring *rx_ring = dev_id;
2394 struct ql_adapter *qdev = rx_ring->qdev;
2395 struct intr_context *intr_context = &qdev->intr_context[0];
2399 /* Experience shows that when using INTx interrupts, interrupts must
2400 * be masked manually.
2401 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2402 * (even though it is auto-masked), otherwise a later command to
2403 * enable it is not effective.
2405 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2406 ql_disable_completion_interrupt(qdev, 0);
2408 var = ql_read32(qdev, STS);
2411 * Check for fatal error.
2414 ql_disable_completion_interrupt(qdev, 0);
2415 ql_queue_asic_error(qdev);
2416 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2417 var = ql_read32(qdev, ERR_STS);
2418 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2423 * Check MPI processor activity.
2425 if ((var & STS_PI) &&
2426 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2428 * We've got an async event or mailbox completion.
2429 * Handle it and clear the source of the interrupt.
2431 netif_err(qdev, intr, qdev->ndev,
2432 "Got MPI processor interrupt.\n");
2433 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2434 queue_delayed_work_on(smp_processor_id(),
2435 qdev->workqueue, &qdev->mpi_work, 0);
2440 * Get the bit-mask that shows the active queues for this
2441 * pass. Compare it to the queues that this irq services
2442 * and call napi if there's a match.
2444 var = ql_read32(qdev, ISR1);
2445 if (var & intr_context->irq_mask) {
2446 netif_info(qdev, intr, qdev->ndev,
2447 "Waking handler for rx_ring[0].\n");
2448 napi_schedule(&rx_ring->napi);
2451 /* Experience shows that the device sometimes signals an
2452 * interrupt but no work is scheduled from this function.
2453 * Nevertheless, the interrupt is auto-masked. Therefore, we
2454 * systematically re-enable the interrupt if we didn't
2457 ql_enable_completion_interrupt(qdev, 0);
2460 return work_done ? IRQ_HANDLED : IRQ_NONE;
2463 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2466 if (skb_is_gso(skb)) {
2468 __be16 l3_proto = vlan_get_protocol(skb);
2470 err = skb_cow_head(skb, 0);
2474 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2475 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2476 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2477 mac_iocb_ptr->total_hdrs_len =
2478 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2479 mac_iocb_ptr->net_trans_offset =
2480 cpu_to_le16(skb_network_offset(skb) |
2481 skb_transport_offset(skb)
2482 << OB_MAC_TRANSPORT_HDR_SHIFT);
2483 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2484 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2485 if (likely(l3_proto == htons(ETH_P_IP))) {
2486 struct iphdr *iph = ip_hdr(skb);
2489 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2490 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2494 } else if (l3_proto == htons(ETH_P_IPV6)) {
2495 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2496 tcp_hdr(skb)->check =
2497 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2498 &ipv6_hdr(skb)->daddr,
2506 static void ql_hw_csum_setup(struct sk_buff *skb,
2507 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2510 struct iphdr *iph = ip_hdr(skb);
2513 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2515 mac_iocb_ptr->net_trans_offset =
2516 cpu_to_le16(skb_network_offset(skb) |
2517 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2519 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2521 if (likely(iph->protocol == IPPROTO_TCP)) {
2522 check = &(tcp_hdr(skb)->check);
2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2524 mac_iocb_ptr->total_hdrs_len =
2525 cpu_to_le16(skb_transport_offset(skb) +
2526 (tcp_hdr(skb)->doff << 2));
2528 check = &(udp_hdr(skb)->check);
2529 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2530 mac_iocb_ptr->total_hdrs_len =
2531 cpu_to_le16(skb_transport_offset(skb) +
2532 sizeof(struct udphdr));
2534 *check = ~csum_tcpudp_magic(iph->saddr,
2535 iph->daddr, len, iph->protocol, 0);
2538 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2540 struct tx_ring_desc *tx_ring_desc;
2541 struct ob_mac_iocb_req *mac_iocb_ptr;
2542 struct ql_adapter *qdev = netdev_priv(ndev);
2544 struct tx_ring *tx_ring;
2545 u32 tx_ring_idx = (u32)skb->queue_mapping;
2547 tx_ring = &qdev->tx_ring[tx_ring_idx];
2549 if (skb_padto(skb, ETH_ZLEN))
2550 return NETDEV_TX_OK;
2552 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2553 netif_info(qdev, tx_queued, qdev->ndev,
2554 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2555 __func__, tx_ring_idx);
2556 netif_stop_subqueue(ndev, tx_ring->wq_id);
2557 tx_ring->tx_errors++;
2558 return NETDEV_TX_BUSY;
2560 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2561 mac_iocb_ptr = tx_ring_desc->queue_entry;
2562 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2564 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2565 mac_iocb_ptr->tid = tx_ring_desc->index;
2566 /* We use the upper 32-bits to store the tx queue for this IO.
2567 * When we get the completion we can use it to establish the context.
2569 mac_iocb_ptr->txq_idx = tx_ring_idx;
2570 tx_ring_desc->skb = skb;
2572 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2574 if (skb_vlan_tag_present(skb)) {
2575 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2576 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2577 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2578 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2580 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2582 dev_kfree_skb_any(skb);
2583 return NETDEV_TX_OK;
2584 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2585 ql_hw_csum_setup(skb,
2586 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2590 netif_err(qdev, tx_queued, qdev->ndev,
2591 "Could not map the segments.\n");
2592 tx_ring->tx_errors++;
2593 return NETDEV_TX_BUSY;
2595 QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
2596 tx_ring->prod_idx++;
2597 if (tx_ring->prod_idx == tx_ring->wq_len)
2598 tx_ring->prod_idx = 0;
2601 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2602 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2603 "tx queued, slot %d, len %d\n",
2604 tx_ring->prod_idx, skb->len);
2606 atomic_dec(&tx_ring->tx_count);
2608 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2609 netif_stop_subqueue(ndev, tx_ring->wq_id);
2610 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2612 * The queue got stopped because the tx_ring was full.
2613 * Wake it up, because it's now at least 25% empty.
2615 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2617 return NETDEV_TX_OK;
2620 static void ql_free_shadow_space(struct ql_adapter *qdev)
2622 if (qdev->rx_ring_shadow_reg_area) {
2623 dma_free_coherent(&qdev->pdev->dev,
2625 qdev->rx_ring_shadow_reg_area,
2626 qdev->rx_ring_shadow_reg_dma);
2627 qdev->rx_ring_shadow_reg_area = NULL;
2629 if (qdev->tx_ring_shadow_reg_area) {
2630 dma_free_coherent(&qdev->pdev->dev,
2632 qdev->tx_ring_shadow_reg_area,
2633 qdev->tx_ring_shadow_reg_dma);
2634 qdev->tx_ring_shadow_reg_area = NULL;
2638 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2640 qdev->rx_ring_shadow_reg_area =
2641 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2642 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2643 if (!qdev->rx_ring_shadow_reg_area) {
2644 netif_err(qdev, ifup, qdev->ndev,
2645 "Allocation of RX shadow space failed.\n");
2649 qdev->tx_ring_shadow_reg_area =
2650 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2651 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2652 if (!qdev->tx_ring_shadow_reg_area) {
2653 netif_err(qdev, ifup, qdev->ndev,
2654 "Allocation of TX shadow space failed.\n");
2655 goto err_wqp_sh_area;
2660 dma_free_coherent(&qdev->pdev->dev,
2662 qdev->rx_ring_shadow_reg_area,
2663 qdev->rx_ring_shadow_reg_dma);
2667 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2669 struct tx_ring_desc *tx_ring_desc;
2671 struct ob_mac_iocb_req *mac_iocb_ptr;
2673 mac_iocb_ptr = tx_ring->wq_base;
2674 tx_ring_desc = tx_ring->q;
2675 for (i = 0; i < tx_ring->wq_len; i++) {
2676 tx_ring_desc->index = i;
2677 tx_ring_desc->skb = NULL;
2678 tx_ring_desc->queue_entry = mac_iocb_ptr;
2682 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2685 static void ql_free_tx_resources(struct ql_adapter *qdev,
2686 struct tx_ring *tx_ring)
2688 if (tx_ring->wq_base) {
2689 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2690 tx_ring->wq_base, tx_ring->wq_base_dma);
2691 tx_ring->wq_base = NULL;
2697 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2698 struct tx_ring *tx_ring)
2701 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2702 &tx_ring->wq_base_dma, GFP_ATOMIC);
2704 if (!tx_ring->wq_base ||
2705 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2709 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2716 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2717 tx_ring->wq_base, tx_ring->wq_base_dma);
2718 tx_ring->wq_base = NULL;
2720 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2724 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2726 struct qlge_bq *lbq = &rx_ring->lbq;
2727 unsigned int last_offset;
2729 last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2730 while (lbq->next_to_clean != lbq->next_to_use) {
2731 struct qlge_bq_desc *lbq_desc =
2732 &lbq->queue[lbq->next_to_clean];
2734 if (lbq_desc->p.pg_chunk.offset == last_offset)
2735 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2736 ql_lbq_block_size(qdev),
2738 put_page(lbq_desc->p.pg_chunk.page);
2740 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2743 if (rx_ring->master_chunk.page) {
2744 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2745 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
2746 put_page(rx_ring->master_chunk.page);
2747 rx_ring->master_chunk.page = NULL;
2751 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2755 for (i = 0; i < QLGE_BQ_LEN; i++) {
2756 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2759 netif_err(qdev, ifup, qdev->ndev,
2760 "sbq_desc %d is NULL.\n", i);
2763 if (sbq_desc->p.skb) {
2764 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2767 dev_kfree_skb(sbq_desc->p.skb);
2768 sbq_desc->p.skb = NULL;
2773 /* Free all large and small rx buffers associated
2774 * with the completion queues for this device.
2776 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2780 for (i = 0; i < qdev->rx_ring_count; i++) {
2781 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2783 if (rx_ring->lbq.queue)
2784 ql_free_lbq_buffers(qdev, rx_ring);
2785 if (rx_ring->sbq.queue)
2786 ql_free_sbq_buffers(qdev, rx_ring);
2790 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2794 for (i = 0; i < qdev->rss_ring_count; i++)
2795 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2799 static int qlge_init_bq(struct qlge_bq *bq)
2801 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2802 struct ql_adapter *qdev = rx_ring->qdev;
2803 struct qlge_bq_desc *bq_desc;
2807 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2808 &bq->base_dma, GFP_ATOMIC);
2810 netif_err(qdev, ifup, qdev->ndev,
2811 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2812 bq_type_name[bq->type]);
2816 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2822 bq_desc = &bq->queue[0];
2823 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2824 bq_desc->p.skb = NULL;
2826 bq_desc->buf_ptr = buf_ptr;
2832 static void ql_free_rx_resources(struct ql_adapter *qdev,
2833 struct rx_ring *rx_ring)
2835 /* Free the small buffer queue. */
2836 if (rx_ring->sbq.base) {
2837 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2838 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2839 rx_ring->sbq.base = NULL;
2842 /* Free the small buffer queue control blocks. */
2843 kfree(rx_ring->sbq.queue);
2844 rx_ring->sbq.queue = NULL;
2846 /* Free the large buffer queue. */
2847 if (rx_ring->lbq.base) {
2848 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2849 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2850 rx_ring->lbq.base = NULL;
2853 /* Free the large buffer queue control blocks. */
2854 kfree(rx_ring->lbq.queue);
2855 rx_ring->lbq.queue = NULL;
2857 /* Free the rx queue. */
2858 if (rx_ring->cq_base) {
2859 dma_free_coherent(&qdev->pdev->dev,
2861 rx_ring->cq_base, rx_ring->cq_base_dma);
2862 rx_ring->cq_base = NULL;
2866 /* Allocate queues and buffers for this completions queue based
2867 * on the values in the parameter structure.
2869 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2870 struct rx_ring *rx_ring)
2874 * Allocate the completion queue for this rx_ring.
2877 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2878 &rx_ring->cq_base_dma, GFP_ATOMIC);
2880 if (!rx_ring->cq_base) {
2881 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2885 if (rx_ring->cq_id < qdev->rss_ring_count &&
2886 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2887 ql_free_rx_resources(qdev, rx_ring);
2894 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2896 struct tx_ring *tx_ring;
2897 struct tx_ring_desc *tx_ring_desc;
2901 * Loop through all queues and free
2904 for (j = 0; j < qdev->tx_ring_count; j++) {
2905 tx_ring = &qdev->tx_ring[j];
2906 for (i = 0; i < tx_ring->wq_len; i++) {
2907 tx_ring_desc = &tx_ring->q[i];
2908 if (tx_ring_desc && tx_ring_desc->skb) {
2909 netif_err(qdev, ifdown, qdev->ndev,
2910 "Freeing lost SKB %p, from queue %d, index %d.\n",
2911 tx_ring_desc->skb, j,
2912 tx_ring_desc->index);
2913 ql_unmap_send(qdev, tx_ring_desc,
2914 tx_ring_desc->map_cnt);
2915 dev_kfree_skb(tx_ring_desc->skb);
2916 tx_ring_desc->skb = NULL;
2922 static void ql_free_mem_resources(struct ql_adapter *qdev)
2926 for (i = 0; i < qdev->tx_ring_count; i++)
2927 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2928 for (i = 0; i < qdev->rx_ring_count; i++)
2929 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2930 ql_free_shadow_space(qdev);
2933 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2937 /* Allocate space for our shadow registers and such. */
2938 if (ql_alloc_shadow_space(qdev))
2941 for (i = 0; i < qdev->rx_ring_count; i++) {
2942 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2943 netif_err(qdev, ifup, qdev->ndev,
2944 "RX resource allocation failed.\n");
2948 /* Allocate tx queue resources */
2949 for (i = 0; i < qdev->tx_ring_count; i++) {
2950 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2951 netif_err(qdev, ifup, qdev->ndev,
2952 "TX resource allocation failed.\n");
2959 ql_free_mem_resources(qdev);
2963 /* Set up the rx ring control block and pass it to the chip.
2964 * The control block is defined as
2965 * "Completion Queue Initialization Control Block", or cqicb.
2967 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2969 struct cqicb *cqicb = &rx_ring->cqicb;
2970 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2971 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2972 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2973 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2974 void __iomem *doorbell_area =
2975 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2978 __le64 *base_indirect_ptr;
2981 /* Set up the shadow registers for this ring. */
2982 rx_ring->prod_idx_sh_reg = shadow_reg;
2983 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2984 *rx_ring->prod_idx_sh_reg = 0;
2985 shadow_reg += sizeof(u64);
2986 shadow_reg_dma += sizeof(u64);
2987 rx_ring->lbq.base_indirect = shadow_reg;
2988 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2989 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2990 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2991 rx_ring->sbq.base_indirect = shadow_reg;
2992 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2994 /* PCI doorbell mem area + 0x00 for consumer index register */
2995 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2996 rx_ring->cnsmr_idx = 0;
2997 rx_ring->curr_entry = rx_ring->cq_base;
2999 /* PCI doorbell mem area + 0x04 for valid register */
3000 rx_ring->valid_db_reg = doorbell_area + 0x04;
3002 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3003 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3005 /* PCI doorbell mem area + 0x1c */
3006 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3008 memset((void *)cqicb, 0, sizeof(struct cqicb));
3009 cqicb->msix_vect = rx_ring->irq;
3011 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3014 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3016 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3019 * Set up the control block load flags.
3021 cqicb->flags = FLAGS_LC | /* Load queue base address */
3022 FLAGS_LV | /* Load MSI-X vector */
3023 FLAGS_LI; /* Load irq delay values */
3024 if (rx_ring->cq_id < qdev->rss_ring_count) {
3025 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3026 tmp = (u64)rx_ring->lbq.base_dma;
3027 base_indirect_ptr = rx_ring->lbq.base_indirect;
3030 *base_indirect_ptr = cpu_to_le64(tmp);
3031 tmp += DB_PAGE_SIZE;
3032 base_indirect_ptr++;
3034 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3035 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3036 cqicb->lbq_buf_size =
3037 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3038 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3039 rx_ring->lbq.next_to_use = 0;
3040 rx_ring->lbq.next_to_clean = 0;
3042 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3043 tmp = (u64)rx_ring->sbq.base_dma;
3044 base_indirect_ptr = rx_ring->sbq.base_indirect;
3047 *base_indirect_ptr = cpu_to_le64(tmp);
3048 tmp += DB_PAGE_SIZE;
3049 base_indirect_ptr++;
3051 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3053 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3054 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3055 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3056 rx_ring->sbq.next_to_use = 0;
3057 rx_ring->sbq.next_to_clean = 0;
3059 if (rx_ring->cq_id < qdev->rss_ring_count) {
3060 /* Inbound completion handling rx_rings run in
3061 * separate NAPI contexts.
3063 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3065 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3066 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3068 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3069 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3071 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3072 CFG_LCQ, rx_ring->cq_id);
3074 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3080 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3082 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3083 void __iomem *doorbell_area =
3084 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3085 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3086 (tx_ring->wq_id * sizeof(u64));
3087 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3088 (tx_ring->wq_id * sizeof(u64));
3092 * Assign doorbell registers for this tx_ring.
3094 /* TX PCI doorbell mem area for tx producer index */
3095 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3096 tx_ring->prod_idx = 0;
3097 /* TX PCI doorbell mem area + 0x04 */
3098 tx_ring->valid_db_reg = doorbell_area + 0x04;
3101 * Assign shadow registers for this tx_ring.
3103 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3104 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3106 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3107 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3108 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3109 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3111 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3113 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3115 ql_init_tx_ring(qdev, tx_ring);
3117 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3118 (u16)tx_ring->wq_id);
3120 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3126 static void ql_disable_msix(struct ql_adapter *qdev)
3128 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3129 pci_disable_msix(qdev->pdev);
3130 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3131 kfree(qdev->msi_x_entry);
3132 qdev->msi_x_entry = NULL;
3133 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3134 pci_disable_msi(qdev->pdev);
3135 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3139 /* We start by trying to get the number of vectors
3140 * stored in qdev->intr_count. If we don't get that
3141 * many then we reduce the count and try again.
3143 static void ql_enable_msix(struct ql_adapter *qdev)
3147 /* Get the MSIX vectors. */
3148 if (qlge_irq_type == MSIX_IRQ) {
3149 /* Try to alloc space for the msix struct,
3150 * if it fails then go to MSI/legacy.
3152 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3153 sizeof(struct msix_entry),
3155 if (!qdev->msi_x_entry) {
3156 qlge_irq_type = MSI_IRQ;
3160 for (i = 0; i < qdev->intr_count; i++)
3161 qdev->msi_x_entry[i].entry = i;
3163 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3164 1, qdev->intr_count);
3166 kfree(qdev->msi_x_entry);
3167 qdev->msi_x_entry = NULL;
3168 netif_warn(qdev, ifup, qdev->ndev,
3169 "MSI-X Enable failed, trying MSI.\n");
3170 qlge_irq_type = MSI_IRQ;
3172 qdev->intr_count = err;
3173 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3174 netif_info(qdev, ifup, qdev->ndev,
3175 "MSI-X Enabled, got %d vectors.\n",
3181 qdev->intr_count = 1;
3182 if (qlge_irq_type == MSI_IRQ) {
3183 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3184 set_bit(QL_MSI_ENABLED, &qdev->flags);
3185 netif_info(qdev, ifup, qdev->ndev,
3186 "Running with MSI interrupts.\n");
3190 qlge_irq_type = LEG_IRQ;
3191 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3192 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3193 "Running with legacy interrupts.\n");
3196 /* Each vector services 1 RSS ring and and 1 or more
3197 * TX completion rings. This function loops through
3198 * the TX completion rings and assigns the vector that
3199 * will service it. An example would be if there are
3200 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3201 * This would mean that vector 0 would service RSS ring 0
3202 * and TX completion rings 0,1,2 and 3. Vector 1 would
3203 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3205 static void ql_set_tx_vect(struct ql_adapter *qdev)
3208 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3210 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3211 /* Assign irq vectors to TX rx_rings.*/
3212 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3213 i < qdev->rx_ring_count; i++) {
3214 if (j == tx_rings_per_vector) {
3218 qdev->rx_ring[i].irq = vect;
3222 /* For single vector all rings have an irq
3225 for (i = 0; i < qdev->rx_ring_count; i++)
3226 qdev->rx_ring[i].irq = 0;
3230 /* Set the interrupt mask for this vector. Each vector
3231 * will service 1 RSS ring and 1 or more TX completion
3232 * rings. This function sets up a bit mask per vector
3233 * that indicates which rings it services.
3235 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3237 int j, vect = ctx->intr;
3238 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3240 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3241 /* Add the RSS ring serviced by this vector
3244 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3245 /* Add the TX ring(s) serviced by this vector
3248 for (j = 0; j < tx_rings_per_vector; j++) {
3250 (1 << qdev->rx_ring[qdev->rss_ring_count +
3251 (vect * tx_rings_per_vector) + j].cq_id);
3254 /* For single vector we just shift each queue's
3257 for (j = 0; j < qdev->rx_ring_count; j++)
3258 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3263 * Here we build the intr_context structures based on
3264 * our rx_ring count and intr vector count.
3265 * The intr_context structure is used to hook each vector
3266 * to possibly different handlers.
3268 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3271 struct intr_context *intr_context = &qdev->intr_context[0];
3273 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3274 /* Each rx_ring has it's
3275 * own intr_context since we have separate
3276 * vectors for each queue.
3278 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3279 qdev->rx_ring[i].irq = i;
3280 intr_context->intr = i;
3281 intr_context->qdev = qdev;
3282 /* Set up this vector's bit-mask that indicates
3283 * which queues it services.
3285 ql_set_irq_mask(qdev, intr_context);
3287 * We set up each vectors enable/disable/read bits so
3288 * there's no bit/mask calculations in the critical path.
3290 intr_context->intr_en_mask =
3291 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3292 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3294 intr_context->intr_dis_mask =
3295 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3296 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3298 intr_context->intr_read_mask =
3299 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3300 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3303 /* The first vector/queue handles
3304 * broadcast/multicast, fatal errors,
3305 * and firmware events. This in addition
3306 * to normal inbound NAPI processing.
3308 intr_context->handler = qlge_isr;
3309 sprintf(intr_context->name, "%s-rx-%d",
3310 qdev->ndev->name, i);
3313 * Inbound queues handle unicast frames only.
3315 intr_context->handler = qlge_msix_rx_isr;
3316 sprintf(intr_context->name, "%s-rx-%d",
3317 qdev->ndev->name, i);
3322 * All rx_rings use the same intr_context since
3323 * there is only one vector.
3325 intr_context->intr = 0;
3326 intr_context->qdev = qdev;
3328 * We set up each vectors enable/disable/read bits so
3329 * there's no bit/mask calculations in the critical path.
3331 intr_context->intr_en_mask =
3332 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3333 intr_context->intr_dis_mask =
3334 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3335 INTR_EN_TYPE_DISABLE;
3336 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3337 /* Experience shows that when using INTx interrupts,
3338 * the device does not always auto-mask INTR_EN_EN.
3339 * Moreover, masking INTR_EN_EN manually does not
3340 * immediately prevent interrupt generation.
3342 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3344 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3346 intr_context->intr_read_mask =
3347 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3349 * Single interrupt means one handler for all rings.
3351 intr_context->handler = qlge_isr;
3352 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3353 /* Set up this vector's bit-mask that indicates
3354 * which queues it services. In this case there is
3355 * a single vector so it will service all RSS and
3356 * TX completion rings.
3358 ql_set_irq_mask(qdev, intr_context);
3360 /* Tell the TX completion rings which MSIx vector
3361 * they will be using.
3363 ql_set_tx_vect(qdev);
3366 static void ql_free_irq(struct ql_adapter *qdev)
3369 struct intr_context *intr_context = &qdev->intr_context[0];
3371 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3372 if (intr_context->hooked) {
3373 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3374 free_irq(qdev->msi_x_entry[i].vector,
3377 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3381 ql_disable_msix(qdev);
3384 static int ql_request_irq(struct ql_adapter *qdev)
3388 struct pci_dev *pdev = qdev->pdev;
3389 struct intr_context *intr_context = &qdev->intr_context[0];
3391 ql_resolve_queues_to_irqs(qdev);
3393 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3394 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3395 status = request_irq(qdev->msi_x_entry[i].vector,
3396 intr_context->handler,
3401 netif_err(qdev, ifup, qdev->ndev,
3402 "Failed request for MSIX interrupt %d.\n",
3407 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3408 "trying msi or legacy interrupts.\n");
3409 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3410 "%s: irq = %d.\n", __func__, pdev->irq);
3411 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3412 "%s: context->name = %s.\n", __func__,
3413 intr_context->name);
3414 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3415 "%s: dev_id = 0x%p.\n", __func__,
3418 request_irq(pdev->irq, qlge_isr,
3419 test_bit(QL_MSI_ENABLED, &qdev->flags)
3422 intr_context->name, &qdev->rx_ring[0]);
3426 netif_err(qdev, ifup, qdev->ndev,
3427 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3428 intr_context->name);
3430 intr_context->hooked = 1;
3434 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3439 static int ql_start_rss(struct ql_adapter *qdev)
3441 static const u8 init_hash_seed[] = {
3442 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3443 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3444 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3445 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3446 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3448 struct ricb *ricb = &qdev->ricb;
3451 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3453 memset((void *)ricb, 0, sizeof(*ricb));
3455 ricb->base_cq = RSS_L4K;
3457 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3458 ricb->mask = cpu_to_le16((u16)(0x3ff));
3461 * Fill out the Indirection Table.
3463 for (i = 0; i < 1024; i++)
3464 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3466 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3467 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3469 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3471 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3477 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3481 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3484 /* Clear all the entries in the routing table. */
3485 for (i = 0; i < 16; i++) {
3486 status = ql_set_routing_reg(qdev, i, 0, 0);
3488 netif_err(qdev, ifup, qdev->ndev,
3489 "Failed to init routing register for CAM packets.\n");
3493 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3497 /* Initialize the frame-to-queue routing. */
3498 static int ql_route_initialize(struct ql_adapter *qdev)
3502 /* Clear all the entries in the routing table. */
3503 status = ql_clear_routing_entries(qdev);
3507 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3511 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3512 RT_IDX_IP_CSUM_ERR, 1);
3514 netif_err(qdev, ifup, qdev->ndev,
3515 "Failed to init routing register for IP CSUM error packets.\n");
3518 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3519 RT_IDX_TU_CSUM_ERR, 1);
3521 netif_err(qdev, ifup, qdev->ndev,
3522 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3525 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3527 netif_err(qdev, ifup, qdev->ndev,
3528 "Failed to init routing register for broadcast packets.\n");
3531 /* If we have more than one inbound queue, then turn on RSS in the
3534 if (qdev->rss_ring_count > 1) {
3535 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3536 RT_IDX_RSS_MATCH, 1);
3538 netif_err(qdev, ifup, qdev->ndev,
3539 "Failed to init routing register for MATCH RSS packets.\n");
3544 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3547 netif_err(qdev, ifup, qdev->ndev,
3548 "Failed to init routing register for CAM packets.\n");
3550 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3554 int ql_cam_route_initialize(struct ql_adapter *qdev)
3558 /* If check if the link is up and use to
3559 * determine if we are setting or clearing
3560 * the MAC address in the CAM.
3562 set = ql_read32(qdev, STS);
3563 set &= qdev->port_link_up;
3564 status = ql_set_mac_addr(qdev, set);
3566 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3570 status = ql_route_initialize(qdev);
3572 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3577 static int ql_adapter_initialize(struct ql_adapter *qdev)
3584 * Set up the System register to halt on errors.
3586 value = SYS_EFE | SYS_FAE;
3588 ql_write32(qdev, SYS, mask | value);
3590 /* Set the default queue, and VLAN behavior. */
3591 value = NIC_RCV_CFG_DFQ;
3592 mask = NIC_RCV_CFG_DFQ_MASK;
3593 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3594 value |= NIC_RCV_CFG_RV;
3595 mask |= (NIC_RCV_CFG_RV << 16);
3597 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3599 /* Set the MPI interrupt to enabled. */
3600 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3602 /* Enable the function, set pagesize, enable error checking. */
3603 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3604 FSC_EC | FSC_VM_PAGE_4K;
3605 value |= SPLT_SETTING;
3607 /* Set/clear header splitting. */
3608 mask = FSC_VM_PAGESIZE_MASK |
3609 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3610 ql_write32(qdev, FSC, mask | value);
3612 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3614 /* Set RX packet routing to use port/pci function on which the
3615 * packet arrived on in addition to usual frame routing.
3616 * This is helpful on bonding where both interfaces can have
3617 * the same MAC address.
3619 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3620 /* Reroute all packets to our Interface.
3621 * They may have been routed to MPI firmware
3624 value = ql_read32(qdev, MGMT_RCV_CFG);
3625 value &= ~MGMT_RCV_CFG_RM;
3628 /* Sticky reg needs clearing due to WOL. */
3629 ql_write32(qdev, MGMT_RCV_CFG, mask);
3630 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3632 /* Default WOL is enable on Mezz cards */
3633 if (qdev->pdev->subsystem_device == 0x0068 ||
3634 qdev->pdev->subsystem_device == 0x0180)
3635 qdev->wol = WAKE_MAGIC;
3637 /* Start up the rx queues. */
3638 for (i = 0; i < qdev->rx_ring_count; i++) {
3639 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3641 netif_err(qdev, ifup, qdev->ndev,
3642 "Failed to start rx ring[%d].\n", i);
3647 /* If there is more than one inbound completion queue
3648 * then download a RICB to configure RSS.
3650 if (qdev->rss_ring_count > 1) {
3651 status = ql_start_rss(qdev);
3653 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3658 /* Start up the tx queues. */
3659 for (i = 0; i < qdev->tx_ring_count; i++) {
3660 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to start tx ring[%d].\n", i);
3668 /* Initialize the port and set the max framesize. */
3669 status = qdev->nic_ops->port_initialize(qdev);
3671 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3673 /* Set up the MAC address and frame routing filter. */
3674 status = ql_cam_route_initialize(qdev);
3676 netif_err(qdev, ifup, qdev->ndev,
3677 "Failed to init CAM/Routing tables.\n");
3681 /* Start NAPI for the RSS queues. */
3682 for (i = 0; i < qdev->rss_ring_count; i++)
3683 napi_enable(&qdev->rx_ring[i].napi);
3688 /* Issue soft reset to chip. */
3689 static int ql_adapter_reset(struct ql_adapter *qdev)
3693 unsigned long end_jiffies;
3695 /* Clear all the entries in the routing table. */
3696 status = ql_clear_routing_entries(qdev);
3698 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3702 /* Check if bit is set then skip the mailbox command and
3703 * clear the bit, else we are in normal reset process.
3705 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3706 /* Stop management traffic. */
3707 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3709 /* Wait for the NIC and MGMNT FIFOs to empty. */
3710 ql_wait_fifo_empty(qdev);
3712 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3715 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3717 end_jiffies = jiffies + usecs_to_jiffies(30);
3719 value = ql_read32(qdev, RST_FO);
3720 if ((value & RST_FO_FR) == 0)
3723 } while (time_before(jiffies, end_jiffies));
3725 if (value & RST_FO_FR) {
3726 netif_err(qdev, ifdown, qdev->ndev,
3727 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3728 status = -ETIMEDOUT;
3731 /* Resume management traffic. */
3732 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3736 static void ql_display_dev_info(struct net_device *ndev)
3738 struct ql_adapter *qdev = netdev_priv(ndev);
3740 netif_info(qdev, probe, qdev->ndev,
3741 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3744 qdev->chip_rev_id & 0x0000000f,
3745 qdev->chip_rev_id >> 4 & 0x0000000f,
3746 qdev->chip_rev_id >> 8 & 0x0000000f,
3747 qdev->chip_rev_id >> 12 & 0x0000000f);
3748 netif_info(qdev, probe, qdev->ndev,
3749 "MAC address %pM\n", ndev->dev_addr);
3752 static int ql_wol(struct ql_adapter *qdev)
3755 u32 wol = MB_WOL_DISABLE;
3757 /* The CAM is still intact after a reset, but if we
3758 * are doing WOL, then we may need to program the
3759 * routing regs. We would also need to issue the mailbox
3760 * commands to instruct the MPI what to do per the ethtool
3764 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3765 WAKE_MCAST | WAKE_BCAST)) {
3766 netif_err(qdev, ifdown, qdev->ndev,
3767 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3772 if (qdev->wol & WAKE_MAGIC) {
3773 status = ql_mb_wol_set_magic(qdev, 1);
3775 netif_err(qdev, ifdown, qdev->ndev,
3776 "Failed to set magic packet on %s.\n",
3780 netif_info(qdev, drv, qdev->ndev,
3781 "Enabled magic packet successfully on %s.\n",
3784 wol |= MB_WOL_MAGIC_PKT;
3788 wol |= MB_WOL_MODE_ON;
3789 status = ql_mb_wol_mode(qdev, wol);
3790 netif_err(qdev, drv, qdev->ndev,
3791 "WOL %s (wol code 0x%x) on %s\n",
3792 (status == 0) ? "Successfully set" : "Failed",
3793 wol, qdev->ndev->name);
3799 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3802 /* Don't kill the reset worker thread if we
3803 * are in the process of recovery.
3805 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3806 cancel_delayed_work_sync(&qdev->asic_reset_work);
3807 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3808 cancel_delayed_work_sync(&qdev->mpi_work);
3809 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3810 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3811 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3814 static int ql_adapter_down(struct ql_adapter *qdev)
3820 ql_cancel_all_work_sync(qdev);
3822 for (i = 0; i < qdev->rss_ring_count; i++)
3823 napi_disable(&qdev->rx_ring[i].napi);
3825 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3827 ql_disable_interrupts(qdev);
3829 ql_tx_ring_clean(qdev);
3831 /* Call netif_napi_del() from common point.
3833 for (i = 0; i < qdev->rss_ring_count; i++)
3834 netif_napi_del(&qdev->rx_ring[i].napi);
3836 status = ql_adapter_reset(qdev);
3838 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3840 ql_free_rx_buffers(qdev);
3845 static int ql_adapter_up(struct ql_adapter *qdev)
3849 err = ql_adapter_initialize(qdev);
3851 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3854 set_bit(QL_ADAPTER_UP, &qdev->flags);
3855 ql_alloc_rx_buffers(qdev);
3856 /* If the port is initialized and the
3857 * link is up the turn on the carrier.
3859 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3860 (ql_read32(qdev, STS) & qdev->port_link_up))
3862 /* Restore rx mode. */
3863 clear_bit(QL_ALLMULTI, &qdev->flags);
3864 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3865 qlge_set_multicast_list(qdev->ndev);
3867 /* Restore vlan setting. */
3868 qlge_restore_vlan(qdev);
3870 ql_enable_interrupts(qdev);
3871 ql_enable_all_completion_interrupts(qdev);
3872 netif_tx_start_all_queues(qdev->ndev);
3876 ql_adapter_reset(qdev);
3880 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3882 ql_free_mem_resources(qdev);
3886 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3888 if (ql_alloc_mem_resources(qdev)) {
3889 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3892 return ql_request_irq(qdev);
3895 static int qlge_close(struct net_device *ndev)
3897 struct ql_adapter *qdev = netdev_priv(ndev);
3900 /* If we hit pci_channel_io_perm_failure
3901 * failure condition, then we already
3902 * brought the adapter down.
3904 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3905 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3906 clear_bit(QL_EEH_FATAL, &qdev->flags);
3911 * Wait for device to recover from a reset.
3912 * (Rarely happens, but possible.)
3914 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3917 /* Make sure refill_work doesn't re-enable napi */
3918 for (i = 0; i < qdev->rss_ring_count; i++)
3919 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3921 ql_adapter_down(qdev);
3922 ql_release_adapter_resources(qdev);
3926 static void qlge_set_lb_size(struct ql_adapter *qdev)
3928 if (qdev->ndev->mtu <= 1500)
3929 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3931 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3932 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3935 static int ql_configure_rings(struct ql_adapter *qdev)
3938 struct rx_ring *rx_ring;
3939 struct tx_ring *tx_ring;
3940 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3942 /* In a perfect world we have one RSS ring for each CPU
3943 * and each has it's own vector. To do that we ask for
3944 * cpu_cnt vectors. ql_enable_msix() will adjust the
3945 * vector count to what we actually get. We then
3946 * allocate an RSS ring for each.
3947 * Essentially, we are doing min(cpu_count, msix_vector_count).
3949 qdev->intr_count = cpu_cnt;
3950 ql_enable_msix(qdev);
3951 /* Adjust the RSS ring count to the actual vector count. */
3952 qdev->rss_ring_count = qdev->intr_count;
3953 qdev->tx_ring_count = cpu_cnt;
3954 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3956 for (i = 0; i < qdev->tx_ring_count; i++) {
3957 tx_ring = &qdev->tx_ring[i];
3958 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3959 tx_ring->qdev = qdev;
3961 tx_ring->wq_len = qdev->tx_ring_size;
3963 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3966 * The completion queue ID for the tx rings start
3967 * immediately after the rss rings.
3969 tx_ring->cq_id = qdev->rss_ring_count + i;
3972 for (i = 0; i < qdev->rx_ring_count; i++) {
3973 rx_ring = &qdev->rx_ring[i];
3974 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3975 rx_ring->qdev = qdev;
3977 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3978 if (i < qdev->rss_ring_count) {
3980 * Inbound (RSS) queues.
3982 rx_ring->cq_len = qdev->rx_ring_size;
3984 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3985 rx_ring->lbq.type = QLGE_LB;
3986 rx_ring->sbq.type = QLGE_SB;
3987 INIT_DELAYED_WORK(&rx_ring->refill_work,
3991 * Outbound queue handles outbound completions only.
3993 /* outbound cq is same size as tx_ring it services. */
3994 rx_ring->cq_len = qdev->tx_ring_size;
3996 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4002 static int qlge_open(struct net_device *ndev)
4005 struct ql_adapter *qdev = netdev_priv(ndev);
4007 err = ql_adapter_reset(qdev);
4011 qlge_set_lb_size(qdev);
4012 err = ql_configure_rings(qdev);
4016 err = ql_get_adapter_resources(qdev);
4020 err = ql_adapter_up(qdev);
4027 ql_release_adapter_resources(qdev);
4031 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4035 /* Wait for an outstanding reset to complete. */
4036 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4039 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4040 netif_err(qdev, ifup, qdev->ndev,
4041 "Waiting for adapter UP...\n");
4046 netif_err(qdev, ifup, qdev->ndev,
4047 "Timed out waiting for adapter UP\n");
4052 status = ql_adapter_down(qdev);
4056 qlge_set_lb_size(qdev);
4058 status = ql_adapter_up(qdev);
4064 netif_alert(qdev, ifup, qdev->ndev,
4065 "Driver up/down cycle failed, closing device.\n");
4066 set_bit(QL_ADAPTER_UP, &qdev->flags);
4067 dev_close(qdev->ndev);
4071 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4073 struct ql_adapter *qdev = netdev_priv(ndev);
4076 if (ndev->mtu == 1500 && new_mtu == 9000)
4077 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4078 else if (ndev->mtu == 9000 && new_mtu == 1500)
4079 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4083 queue_delayed_work(qdev->workqueue,
4084 &qdev->mpi_port_cfg_work, 3 * HZ);
4086 ndev->mtu = new_mtu;
4088 if (!netif_running(qdev->ndev))
4091 status = ql_change_rx_buffers(qdev);
4093 netif_err(qdev, ifup, qdev->ndev,
4094 "Changing MTU failed.\n");
4100 static struct net_device_stats *qlge_get_stats(struct net_device
4103 struct ql_adapter *qdev = netdev_priv(ndev);
4104 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4105 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4106 unsigned long pkts, mcast, dropped, errors, bytes;
4110 pkts = mcast = dropped = errors = bytes = 0;
4111 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4112 pkts += rx_ring->rx_packets;
4113 bytes += rx_ring->rx_bytes;
4114 dropped += rx_ring->rx_dropped;
4115 errors += rx_ring->rx_errors;
4116 mcast += rx_ring->rx_multicast;
4118 ndev->stats.rx_packets = pkts;
4119 ndev->stats.rx_bytes = bytes;
4120 ndev->stats.rx_dropped = dropped;
4121 ndev->stats.rx_errors = errors;
4122 ndev->stats.multicast = mcast;
4125 pkts = errors = bytes = 0;
4126 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4127 pkts += tx_ring->tx_packets;
4128 bytes += tx_ring->tx_bytes;
4129 errors += tx_ring->tx_errors;
4131 ndev->stats.tx_packets = pkts;
4132 ndev->stats.tx_bytes = bytes;
4133 ndev->stats.tx_errors = errors;
4134 return &ndev->stats;
4137 static void qlge_set_multicast_list(struct net_device *ndev)
4139 struct ql_adapter *qdev = netdev_priv(ndev);
4140 struct netdev_hw_addr *ha;
4143 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4147 * Set or clear promiscuous mode if a
4148 * transition is taking place.
4150 if (ndev->flags & IFF_PROMISC) {
4151 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4152 if (ql_set_routing_reg
4153 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4154 netif_err(qdev, hw, qdev->ndev,
4155 "Failed to set promiscuous mode.\n");
4157 set_bit(QL_PROMISCUOUS, &qdev->flags);
4161 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4162 if (ql_set_routing_reg
4163 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4164 netif_err(qdev, hw, qdev->ndev,
4165 "Failed to clear promiscuous mode.\n");
4167 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4173 * Set or clear all multicast mode if a
4174 * transition is taking place.
4176 if ((ndev->flags & IFF_ALLMULTI) ||
4177 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4178 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4179 if (ql_set_routing_reg
4180 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4181 netif_err(qdev, hw, qdev->ndev,
4182 "Failed to set all-multi mode.\n");
4184 set_bit(QL_ALLMULTI, &qdev->flags);
4188 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4189 if (ql_set_routing_reg
4190 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4191 netif_err(qdev, hw, qdev->ndev,
4192 "Failed to clear all-multi mode.\n");
4194 clear_bit(QL_ALLMULTI, &qdev->flags);
4199 if (!netdev_mc_empty(ndev)) {
4200 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4204 netdev_for_each_mc_addr(ha, ndev) {
4205 if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4206 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4207 netif_err(qdev, hw, qdev->ndev,
4208 "Failed to loadmulticast address.\n");
4209 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4214 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4215 if (ql_set_routing_reg
4216 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4217 netif_err(qdev, hw, qdev->ndev,
4218 "Failed to set multicast match mode.\n");
4220 set_bit(QL_ALLMULTI, &qdev->flags);
4224 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4227 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4229 struct ql_adapter *qdev = netdev_priv(ndev);
4230 struct sockaddr *addr = p;
4233 if (!is_valid_ether_addr(addr->sa_data))
4234 return -EADDRNOTAVAIL;
4235 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4236 /* Update local copy of current mac address. */
4237 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4239 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4242 status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4243 MAC_ADDR_TYPE_CAM_MAC,
4244 qdev->func * MAX_CQ);
4246 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4247 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4251 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4253 struct ql_adapter *qdev = netdev_priv(ndev);
4255 ql_queue_asic_error(qdev);
4258 static void ql_asic_reset_work(struct work_struct *work)
4260 struct ql_adapter *qdev =
4261 container_of(work, struct ql_adapter, asic_reset_work.work);
4265 status = ql_adapter_down(qdev);
4269 status = ql_adapter_up(qdev);
4273 /* Restore rx mode. */
4274 clear_bit(QL_ALLMULTI, &qdev->flags);
4275 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4276 qlge_set_multicast_list(qdev->ndev);
4281 netif_alert(qdev, ifup, qdev->ndev,
4282 "Driver up/down cycle failed, closing device\n");
4284 set_bit(QL_ADAPTER_UP, &qdev->flags);
4285 dev_close(qdev->ndev);
4289 static const struct nic_operations qla8012_nic_ops = {
4290 .get_flash = ql_get_8012_flash_params,
4291 .port_initialize = ql_8012_port_initialize,
4294 static const struct nic_operations qla8000_nic_ops = {
4295 .get_flash = ql_get_8000_flash_params,
4296 .port_initialize = ql_8000_port_initialize,
4299 /* Find the pcie function number for the other NIC
4300 * on this chip. Since both NIC functions share a
4301 * common firmware we have the lowest enabled function
4302 * do any common work. Examples would be resetting
4303 * after a fatal firmware error, or doing a firmware
4306 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4310 u32 nic_func1, nic_func2;
4312 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4317 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4318 MPI_TEST_NIC_FUNC_MASK);
4319 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4320 MPI_TEST_NIC_FUNC_MASK);
4322 if (qdev->func == nic_func1)
4323 qdev->alt_func = nic_func2;
4324 else if (qdev->func == nic_func2)
4325 qdev->alt_func = nic_func1;
4332 static int ql_get_board_info(struct ql_adapter *qdev)
4337 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4341 status = ql_get_alt_pcie_func(qdev);
4345 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4347 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4348 qdev->port_link_up = STS_PL1;
4349 qdev->port_init = STS_PI1;
4350 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4351 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4353 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4354 qdev->port_link_up = STS_PL0;
4355 qdev->port_init = STS_PI0;
4356 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4357 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4359 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4360 qdev->device_id = qdev->pdev->device;
4361 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4362 qdev->nic_ops = &qla8012_nic_ops;
4363 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4364 qdev->nic_ops = &qla8000_nic_ops;
4368 static void ql_release_all(struct pci_dev *pdev)
4370 struct net_device *ndev = pci_get_drvdata(pdev);
4371 struct ql_adapter *qdev = netdev_priv(ndev);
4373 if (qdev->workqueue) {
4374 destroy_workqueue(qdev->workqueue);
4375 qdev->workqueue = NULL;
4379 iounmap(qdev->reg_base);
4380 if (qdev->doorbell_area)
4381 iounmap(qdev->doorbell_area);
4382 vfree(qdev->mpi_coredump);
4383 pci_release_regions(pdev);
4386 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4389 struct ql_adapter *qdev = netdev_priv(ndev);
4392 memset((void *)qdev, 0, sizeof(*qdev));
4393 err = pci_enable_device(pdev);
4395 dev_err(&pdev->dev, "PCI device enable failed.\n");
4401 pci_set_drvdata(pdev, ndev);
4403 /* Set PCIe read request size */
4404 err = pcie_set_readrq(pdev, 4096);
4406 dev_err(&pdev->dev, "Set readrq failed.\n");
4410 err = pci_request_regions(pdev, DRV_NAME);
4412 dev_err(&pdev->dev, "PCI region request failed.\n");
4416 pci_set_master(pdev);
4417 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4418 set_bit(QL_DMA64, &qdev->flags);
4419 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4421 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4423 err = dma_set_coherent_mask(&pdev->dev,
4428 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4432 /* Set PCIe reset type for EEH to fundamental. */
4433 pdev->needs_freset = 1;
4434 pci_save_state(pdev);
4436 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4437 if (!qdev->reg_base) {
4438 dev_err(&pdev->dev, "Register mapping failed.\n");
4443 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4444 qdev->doorbell_area =
4445 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4446 if (!qdev->doorbell_area) {
4447 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4452 err = ql_get_board_info(qdev);
4454 dev_err(&pdev->dev, "Register access failed.\n");
4458 qdev->msg_enable = netif_msg_init(debug, default_msg);
4459 spin_lock_init(&qdev->stats_lock);
4461 if (qlge_mpi_coredump) {
4462 qdev->mpi_coredump =
4463 vmalloc(sizeof(struct ql_mpi_coredump));
4464 if (!qdev->mpi_coredump) {
4468 if (qlge_force_coredump)
4469 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4471 /* make sure the EEPROM is good */
4472 err = qdev->nic_ops->get_flash(qdev);
4474 dev_err(&pdev->dev, "Invalid FLASH.\n");
4478 /* Keep local copy of current mac address. */
4479 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4481 /* Set up the default ring sizes. */
4482 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4483 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4485 /* Set up the coalescing parameters. */
4486 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4487 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4488 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4489 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4492 * Set up the operating parameters.
4494 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4496 if (!qdev->workqueue) {
4501 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4502 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4503 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4504 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4505 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4506 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4507 init_completion(&qdev->ide_completion);
4508 mutex_init(&qdev->mpi_mutex);
4511 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4512 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4513 DRV_NAME, DRV_VERSION);
4517 ql_release_all(pdev);
4519 pci_disable_device(pdev);
4523 static const struct net_device_ops qlge_netdev_ops = {
4524 .ndo_open = qlge_open,
4525 .ndo_stop = qlge_close,
4526 .ndo_start_xmit = qlge_send,
4527 .ndo_change_mtu = qlge_change_mtu,
4528 .ndo_get_stats = qlge_get_stats,
4529 .ndo_set_rx_mode = qlge_set_multicast_list,
4530 .ndo_set_mac_address = qlge_set_mac_address,
4531 .ndo_validate_addr = eth_validate_addr,
4532 .ndo_tx_timeout = qlge_tx_timeout,
4533 .ndo_set_features = qlge_set_features,
4534 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4535 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4538 static void ql_timer(struct timer_list *t)
4540 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4543 var = ql_read32(qdev, STS);
4544 if (pci_channel_offline(qdev->pdev)) {
4545 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4549 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4552 static int qlge_probe(struct pci_dev *pdev,
4553 const struct pci_device_id *pci_entry)
4555 struct net_device *ndev = NULL;
4556 struct ql_adapter *qdev = NULL;
4557 static int cards_found;
4560 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4562 netif_get_num_default_rss_queues()));
4566 err = ql_init_device(pdev, ndev, cards_found);
4572 qdev = netdev_priv(ndev);
4573 SET_NETDEV_DEV(ndev, &pdev->dev);
4574 ndev->hw_features = NETIF_F_SG |
4578 NETIF_F_HW_VLAN_CTAG_TX |
4579 NETIF_F_HW_VLAN_CTAG_RX |
4580 NETIF_F_HW_VLAN_CTAG_FILTER |
4582 ndev->features = ndev->hw_features;
4583 ndev->vlan_features = ndev->hw_features;
4584 /* vlan gets same features (except vlan filter) */
4585 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4586 NETIF_F_HW_VLAN_CTAG_TX |
4587 NETIF_F_HW_VLAN_CTAG_RX);
4589 if (test_bit(QL_DMA64, &qdev->flags))
4590 ndev->features |= NETIF_F_HIGHDMA;
4593 * Set up net_device structure.
4595 ndev->tx_queue_len = qdev->tx_ring_size;
4596 ndev->irq = pdev->irq;
4598 ndev->netdev_ops = &qlge_netdev_ops;
4599 ndev->ethtool_ops = &qlge_ethtool_ops;
4600 ndev->watchdog_timeo = 10 * HZ;
4602 /* MTU range: this driver only supports 1500 or 9000, so this only
4603 * filters out values above or below, and we'll rely on
4604 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4606 ndev->min_mtu = ETH_DATA_LEN;
4607 ndev->max_mtu = 9000;
4609 err = register_netdev(ndev);
4611 dev_err(&pdev->dev, "net device registration failed.\n");
4612 ql_release_all(pdev);
4613 pci_disable_device(pdev);
4617 /* Start up the timer to trigger EEH if
4620 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4621 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4623 ql_display_dev_info(ndev);
4624 atomic_set(&qdev->lb_count, 0);
4629 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4631 return qlge_send(skb, ndev);
4634 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4636 return ql_clean_inbound_rx_ring(rx_ring, budget);
4639 static void qlge_remove(struct pci_dev *pdev)
4641 struct net_device *ndev = pci_get_drvdata(pdev);
4642 struct ql_adapter *qdev = netdev_priv(ndev);
4644 del_timer_sync(&qdev->timer);
4645 ql_cancel_all_work_sync(qdev);
4646 unregister_netdev(ndev);
4647 ql_release_all(pdev);
4648 pci_disable_device(pdev);
4652 /* Clean up resources without touching hardware. */
4653 static void ql_eeh_close(struct net_device *ndev)
4656 struct ql_adapter *qdev = netdev_priv(ndev);
4658 if (netif_carrier_ok(ndev)) {
4659 netif_carrier_off(ndev);
4660 netif_stop_queue(ndev);
4663 /* Disabling the timer */
4664 ql_cancel_all_work_sync(qdev);
4666 for (i = 0; i < qdev->rss_ring_count; i++)
4667 netif_napi_del(&qdev->rx_ring[i].napi);
4669 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4670 ql_tx_ring_clean(qdev);
4671 ql_free_rx_buffers(qdev);
4672 ql_release_adapter_resources(qdev);
4676 * This callback is called by the PCI subsystem whenever
4677 * a PCI bus error is detected.
4679 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4680 pci_channel_state_t state)
4682 struct net_device *ndev = pci_get_drvdata(pdev);
4683 struct ql_adapter *qdev = netdev_priv(ndev);
4686 case pci_channel_io_normal:
4687 return PCI_ERS_RESULT_CAN_RECOVER;
4688 case pci_channel_io_frozen:
4689 netif_device_detach(ndev);
4690 del_timer_sync(&qdev->timer);
4691 if (netif_running(ndev))
4693 pci_disable_device(pdev);
4694 return PCI_ERS_RESULT_NEED_RESET;
4695 case pci_channel_io_perm_failure:
4697 "%s: pci_channel_io_perm_failure.\n", __func__);
4698 del_timer_sync(&qdev->timer);
4700 set_bit(QL_EEH_FATAL, &qdev->flags);
4701 return PCI_ERS_RESULT_DISCONNECT;
4704 /* Request a slot reset. */
4705 return PCI_ERS_RESULT_NEED_RESET;
4709 * This callback is called after the PCI buss has been reset.
4710 * Basically, this tries to restart the card from scratch.
4711 * This is a shortened version of the device probe/discovery code,
4712 * it resembles the first-half of the () routine.
4714 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4716 struct net_device *ndev = pci_get_drvdata(pdev);
4717 struct ql_adapter *qdev = netdev_priv(ndev);
4719 pdev->error_state = pci_channel_io_normal;
4721 pci_restore_state(pdev);
4722 if (pci_enable_device(pdev)) {
4723 netif_err(qdev, ifup, qdev->ndev,
4724 "Cannot re-enable PCI device after reset.\n");
4725 return PCI_ERS_RESULT_DISCONNECT;
4727 pci_set_master(pdev);
4729 if (ql_adapter_reset(qdev)) {
4730 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4731 set_bit(QL_EEH_FATAL, &qdev->flags);
4732 return PCI_ERS_RESULT_DISCONNECT;
4735 return PCI_ERS_RESULT_RECOVERED;
4738 static void qlge_io_resume(struct pci_dev *pdev)
4740 struct net_device *ndev = pci_get_drvdata(pdev);
4741 struct ql_adapter *qdev = netdev_priv(ndev);
4744 if (netif_running(ndev)) {
4745 err = qlge_open(ndev);
4747 netif_err(qdev, ifup, qdev->ndev,
4748 "Device initialization failed after reset.\n");
4752 netif_err(qdev, ifup, qdev->ndev,
4753 "Device was not running prior to EEH.\n");
4755 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4756 netif_device_attach(ndev);
4759 static const struct pci_error_handlers qlge_err_handler = {
4760 .error_detected = qlge_io_error_detected,
4761 .slot_reset = qlge_io_slot_reset,
4762 .resume = qlge_io_resume,
4765 static int __maybe_unused qlge_suspend(struct device *dev_d)
4767 struct net_device *ndev = dev_get_drvdata(dev_d);
4768 struct ql_adapter *qdev = netdev_priv(ndev);
4771 netif_device_detach(ndev);
4772 del_timer_sync(&qdev->timer);
4774 if (netif_running(ndev)) {
4775 err = ql_adapter_down(qdev);
4785 static int __maybe_unused qlge_resume(struct device *dev_d)
4787 struct net_device *ndev = dev_get_drvdata(dev_d);
4788 struct ql_adapter *qdev = netdev_priv(ndev);
4791 pci_set_master(to_pci_dev(dev_d));
4793 device_wakeup_disable(dev_d);
4795 if (netif_running(ndev)) {
4796 err = ql_adapter_up(qdev);
4801 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4802 netif_device_attach(ndev);
4807 static void qlge_shutdown(struct pci_dev *pdev)
4809 qlge_suspend(&pdev->dev);
4812 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4814 static struct pci_driver qlge_driver = {
4816 .id_table = qlge_pci_tbl,
4817 .probe = qlge_probe,
4818 .remove = qlge_remove,
4819 .driver.pm = &qlge_pm_ops,
4820 .shutdown = qlge_shutdown,
4821 .err_handler = &qlge_err_handler
4824 module_pci_driver(qlge_driver);