1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic qlge NIC HBA Driver
4 * Copyright (c) 2003-2008 QLogic Corporation
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
45 #include "qlge_devlink.h"
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
61 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
63 static int debug = -1; /* defaults above */
64 module_param(debug, int, 0664);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static int qlge_irq_type = MSIX_IRQ;
71 module_param(qlge_irq_type, int, 0664);
72 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
74 static int qlge_mpi_coredump;
75 module_param(qlge_mpi_coredump, int, 0);
76 MODULE_PARM_DESC(qlge_mpi_coredump,
77 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
79 static int qlge_force_coredump;
80 module_param(qlge_force_coredump, int, 0);
81 MODULE_PARM_DESC(qlge_force_coredump,
82 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
84 static const struct pci_device_id qlge_pci_tbl[] = {
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
93 static int qlge_wol(struct qlge_adapter *);
94 static void qlge_set_multicast_list(struct net_device *);
95 static int qlge_adapter_down(struct qlge_adapter *);
96 static int qlge_adapter_up(struct qlge_adapter *);
98 /* This hardware semaphore causes exclusive access to
99 * resources shared between the NIC driver, MPI firmware,
100 * FCOE firmware and the FC driver.
102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
107 case SEM_XGMAC0_MASK:
108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
110 case SEM_XGMAC1_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
116 case SEM_MAC_ADDR_MASK:
117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
125 case SEM_RT_IDX_MASK:
126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
128 case SEM_PROC_REG_MASK:
129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 return !(qlge_read32(qdev, SEM) & sem_bits);
140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
142 unsigned int wait_count = 30;
145 if (!qlge_sem_trylock(qdev, sem_mask))
148 } while (--wait_count);
152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
154 qlge_write32(qdev, SEM, sem_mask);
155 qlge_read32(qdev, SEM); /* flush */
158 /* This function waits for a specific bit to come ready
159 * in a given register. It is used mostly by the initialize
160 * process, but is also used in kernel thread API such as
161 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 for (count = 0; count < UDELAY_COUNT; count++) {
169 temp = qlge_read32(qdev, reg);
171 /* check for errors */
172 if (temp & err_bit) {
173 netif_alert(qdev, probe, qdev->ndev,
174 "register 0x%.08x access error, value = 0x%.08x!.\n",
177 } else if (temp & bit) {
180 udelay(UDELAY_DELAY);
182 netif_alert(qdev, probe, qdev->ndev,
183 "Timed out waiting for reg %x to come ready.\n", reg);
187 /* The CFG register is used to download TX and RX control blocks
188 * to the chip. This function waits for an operation to complete.
190 static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
195 for (count = 0; count < UDELAY_COUNT; count++) {
196 temp = qlge_read32(qdev, CFG);
201 udelay(UDELAY_DELAY);
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 direction = DMA_TO_DEVICE;
221 direction = DMA_FROM_DEVICE;
223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
233 status = qlge_wait_cfg(qdev, bit);
235 netif_err(qdev, ifup, qdev->ndev,
236 "Timed out waiting for CFG to come ready.\n");
240 qlge_write32(qdev, ICB_L, (u32)map);
241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
243 mask = CFG_Q_MASK | (bit << 16);
244 value = bit | (q_id << CFG_Q_SHIFT);
245 qlge_write32(qdev, CFG, (mask | value));
248 * Wait for the bit to clear after signaling hw.
250 status = qlge_wait_cfg(qdev, bit);
252 qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
258 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
259 int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC: {
268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
271 qlge_write32(qdev, MAC_ADDR_IDX,
272 (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS |
276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
283 qlge_write32(qdev, MAC_ADDR_IDX,
284 (offset++) | /* offset */
285 (index << MAC_ADDR_IDX_SHIFT) | /* index */
286 MAC_ADDR_ADR | MAC_ADDR_RS |
288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 qlge_write32(qdev, MAC_ADDR_IDX,
298 (offset++) | /* offset */
300 << MAC_ADDR_IDX_SHIFT) | /* index */
302 MAC_ADDR_RS | type); /* type */
303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
311 case MAC_ADDR_TYPE_VLAN:
312 case MAC_ADDR_TYPE_MULTI_FLTR:
314 netif_crit(qdev, ifup, qdev->ndev,
315 "Address type %d not yet supported.\n", type);
321 /* Set up a MAC, multicast or VLAN address for the
322 * inbound frame matching.
324 static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
331 case MAC_ADDR_TYPE_MULTI_MAC: {
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
339 qlge_write32(qdev, MAC_ADDR_IDX,
340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346 qlge_write32(qdev, MAC_ADDR_IDX,
347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
354 case MAC_ADDR_TYPE_CAM_MAC: {
356 u32 upper = (addr[0] << 8) | addr[1];
357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 qlge_write32(qdev, MAC_ADDR_IDX,
363 (offset++) | /* offset */
364 (index << MAC_ADDR_IDX_SHIFT) | /* index */
366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
370 qlge_write32(qdev, MAC_ADDR_IDX,
371 (offset++) | /* offset */
372 (index << MAC_ADDR_IDX_SHIFT) | /* index */
374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
378 qlge_write32(qdev, MAC_ADDR_IDX,
379 (offset) | /* offset */
380 (index << MAC_ADDR_IDX_SHIFT) | /* index */
382 /* This field should also include the queue id
383 * and possibly the function id. Right now we hardcode
384 * the route field to NIC core.
386 cam_output = (CAM_OUT_ROUTE_NIC |
387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 (0 << CAM_OUT_CQ_ID_SHIFT));
389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 cam_output |= CAM_OUT_RV;
391 /* route to NIC core */
392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
395 case MAC_ADDR_TYPE_VLAN: {
396 u32 enable_bit = *((u32 *)&addr[0]);
397 /* For VLAN, the addr actually holds a bit that
398 * either enables or disables the vlan id we are
399 * addressing. It's either MAC_ADDR_E on or off.
400 * That's bit-27 we're talking about.
402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
405 qlge_write32(qdev, MAC_ADDR_IDX,
406 offset | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 enable_bit); /* enable/disable */
412 case MAC_ADDR_TYPE_MULTI_FLTR:
414 netif_crit(qdev, ifup, qdev->ndev,
415 "Address type %d not yet supported.\n", type);
421 /* Set or clear MAC address in hardware. We sometimes
422 * have to clear it to prevent wrong frame routing
423 * especially in a bonding environment.
425 static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
428 char zero_mac_addr[ETH_ALEN];
432 addr = &qdev->current_mac_addr[0];
433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 "Set Mac addr %pM\n", addr);
436 eth_zero_addr(zero_mac_addr);
437 addr = &zero_mac_addr[0];
438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 "Clearing MAC address\n");
441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
444 status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
445 MAC_ADDR_TYPE_CAM_MAC,
446 qdev->func * MAX_CQ);
447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
449 netif_err(qdev, ifup, qdev->ndev,
450 "Failed to init mac address.\n");
454 void qlge_link_on(struct qlge_adapter *qdev)
456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 netif_carrier_on(qdev->ndev);
458 qlge_set_mac_addr(qdev, 1);
461 void qlge_link_off(struct qlge_adapter *qdev)
463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 netif_carrier_off(qdev->ndev);
465 qlge_set_mac_addr(qdev, 0);
468 /* Get a specific frame routing value from the CAM.
469 * Used for debug and reg dump.
471 int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
479 qlge_write32(qdev, RT_IDX,
480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
484 *value = qlge_read32(qdev, RT_DATA);
489 /* The NIC function for this chip has 16 routing indexes. Each one can be used
490 * to route different frame types to various inbound queues. We send broadcast/
491 * multicast/error frames to the default queue for slow handling,
492 * and CAM hit/RSS frames to the fast handling queues.
494 static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
497 int status = -EINVAL; /* Return error if no mask match. */
503 value = RT_IDX_DST_CAM_Q | /* dest */
504 RT_IDX_TYPE_NICQ | /* type */
505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
508 case RT_IDX_VALID: /* Promiscuous Mode frames. */
510 value = RT_IDX_DST_DFLT_Q | /* dest */
511 RT_IDX_TYPE_NICQ | /* type */
512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
515 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
517 value = RT_IDX_DST_DFLT_Q | /* dest */
518 RT_IDX_TYPE_NICQ | /* type */
519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
522 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
524 value = RT_IDX_DST_DFLT_Q | /* dest */
525 RT_IDX_TYPE_NICQ | /* type */
526 (RT_IDX_IP_CSUM_ERR_SLOT <<
527 RT_IDX_IDX_SHIFT); /* index */
530 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
532 value = RT_IDX_DST_DFLT_Q | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 RT_IDX_IDX_SHIFT); /* index */
538 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
540 value = RT_IDX_DST_DFLT_Q | /* dest */
541 RT_IDX_TYPE_NICQ | /* type */
542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
547 value = RT_IDX_DST_DFLT_Q | /* dest */
548 RT_IDX_TYPE_NICQ | /* type */
549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
552 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
554 value = RT_IDX_DST_DFLT_Q | /* dest */
555 RT_IDX_TYPE_NICQ | /* type */
556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
559 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
561 value = RT_IDX_DST_RSS | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 case 0: /* Clear the E-bit on an entry. */
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (index << RT_IDX_IDX_SHIFT);/* index */
574 netif_err(qdev, ifup, qdev->ndev,
575 "Mask type %d not yet supported.\n", mask);
581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
584 value |= (enable ? RT_IDX_E : 0);
585 qlge_write32(qdev, RT_IDX, value);
586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
592 static void qlge_enable_interrupts(struct qlge_adapter *qdev)
594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
597 static void qlge_disable_interrupts(struct qlge_adapter *qdev)
599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
602 static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
604 struct intr_context *ctx = &qdev->intr_context[intr];
606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
609 static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
611 struct intr_context *ctx = &qdev->intr_context[intr];
613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
616 static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
620 for (i = 0; i < qdev->intr_count; i++)
621 qlge_enable_completion_interrupt(qdev, i);
624 static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
628 __le16 *flash = (__le16 *)&qdev->flash;
630 status = strncmp((char *)&qdev->flash, str, 4);
632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
636 for (i = 0; i < size; i++)
637 csum += le16_to_cpu(*flash++);
640 netif_err(qdev, ifup, qdev->ndev,
641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
646 static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
649 /* wait for reg to come ready */
650 status = qlge_wait_reg_rdy(qdev,
651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
654 /* set up for reg read */
655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656 /* wait for reg to come ready */
657 status = qlge_wait_reg_rdy(qdev,
658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
661 /* This data is stored on flash as an array of
662 * __le32. Since qlge_read32() returns cpu endian
663 * we need to swap it back.
665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
670 static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
674 __le32 *p = (__le32 *)&qdev->flash;
678 /* Get flash offset for function and adjust
682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
689 size = sizeof(struct flash_params_8000) / sizeof(u32);
690 for (i = 0; i < size; i++, p++) {
691 status = qlge_read_flash_word(qdev, i + offset, p);
693 netif_err(qdev, ifup, qdev->ndev,
694 "Error reading flash.\n");
699 status = qlge_validate_flash(qdev,
700 sizeof(struct flash_params_8000) /
704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
709 /* Extract either manufacturer or BOFM modified
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
721 if (!is_valid_ether_addr(mac_addr)) {
722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
727 memcpy(qdev->ndev->dev_addr,
729 qdev->ndev->addr_len);
732 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
736 static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
740 __le32 *p = (__le32 *)&qdev->flash;
742 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
744 /* Second function's parameters follow the first
750 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
753 for (i = 0; i < size; i++, p++) {
754 status = qlge_read_flash_word(qdev, i + offset, p);
756 netif_err(qdev, ifup, qdev->ndev,
757 "Error reading flash.\n");
762 status = qlge_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
782 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787 * register pair. Each read/write requires us to wait for the ready
788 * bit before reading/writing the data.
790 static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
793 /* wait for reg to come ready */
794 status = qlge_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
798 /* write the data to the data reg */
799 qlge_write32(qdev, XGMAC_DATA, data);
800 /* trigger the write */
801 qlge_write32(qdev, XGMAC_ADDR, reg);
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806 * register pair. Each read/write requires us to wait for the ready
807 * bit before reading/writing the data.
809 int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
812 /* wait for reg to come ready */
813 status = qlge_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
817 /* set up for reg read */
818 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819 /* wait for reg to come ready */
820 status = qlge_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
825 *data = qlge_read32(qdev, XGMAC_DATA);
830 /* This is used for reading the 64-bit statistics regs. */
831 int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
837 status = qlge_read_xgmac_reg(qdev, reg, &lo);
841 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
845 *data = (u64)lo | ((u64)hi << 32);
851 static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
855 * Get MPI firmware version for driver banner
858 status = qlge_mb_about_fw(qdev);
861 status = qlge_mb_get_fw_state(qdev);
864 /* Wake up a worker to get/set the TX/RX frame sizes. */
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
870 /* Take the MAC Core out of reset.
871 * Enable statistics counting.
872 * Take the transmitter/receiver out of reset.
873 * This functionality may be done in the MPI firmware at a
876 static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
881 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
882 /* Another function has the semaphore, so
883 * wait for the port init bit to come ready.
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896 /* Set the core reset. */
897 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
900 data |= GLOBAL_CFG_RESET;
901 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
905 /* Clear the core reset and turn on jumbo for receiver. */
906 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
907 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
914 /* Enable transmitter, and clear it's reset. */
915 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
918 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
919 data |= TX_CFG_EN; /* Enable the transmitter. */
920 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
924 /* Enable receiver and clear it's reset. */
925 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
928 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
929 data |= RX_CFG_EN; /* Enable the receiver. */
930 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
936 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
940 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
944 /* Signal to the world that the port is enabled. */
945 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
947 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
951 static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
953 return PAGE_SIZE << qdev->lbq_buf_order;
956 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
958 struct qlge_bq_desc *bq_desc;
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
966 static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
967 struct rx_ring *rx_ring)
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 qlge_lbq_block_size(qdev)) {
976 /* last chunk of the master page */
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
984 /* Update an rx ring index. */
985 static void qlge_update_cq(struct rx_ring *rx_ring)
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
995 static void qlge_write_cq_idx(struct rx_ring *rx_ring)
997 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1000 static const char * const bq_type_name[] = {
1005 /* return 0 or negative error */
1006 static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1009 struct qlge_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1012 if (sbq_desc->p.skb)
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1022 skb_reserve(skb, QLGE_SB_PAD);
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1034 sbq_desc->p.skb = skb;
1038 /* return 0 or negative error */
1039 static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1042 struct qlge_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1045 if (!master_chunk->page) {
1047 dma_addr_t dma_addr;
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 qlge_lbq_block_size(qdev),
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1072 /* Adjust the master page chunk for next
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1086 /* return 0 or negative error */
1087 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct qlge_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1123 bq_desc = &bq->queue[0];
1127 } while (refill_count);
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1136 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1138 bq->next_to_use = i;
1144 static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1147 bool sbq_fail, lbq_fail;
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1152 /* Minimum number of buffers needed to be able to receive at least one
1153 * frame of any format:
1154 * sbq: 1 for header + 1 for data
1155 * lbq: mtu 9000 / lb size
1156 * Below this, the queue might stall.
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161 /* Allocations can take a long time in certain cases (ex.
1162 * reclaim). Therefore, use a workqueue for long-running
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1169 static void qlge_slow_refill(struct work_struct *work)
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1173 struct napi_struct *napi = &rx_ring->napi;
1176 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1180 /* napi_disable() might have prevented incomplete napi work from being
1183 napi_schedule(napi);
1184 /* trigger softirq processing */
1188 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1189 * fails at some stage, or from the interrupt when a tx completes.
1191 static void qlge_unmap_send(struct qlge_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1199 * Unmap the skb->data area, or the
1200 * external sglist (AKA the Outbound
1201 * Address List (OAL)).
1202 * If its the zeroeth element, then it's
1203 * the skb->data area. If it's the 7th
1204 * element and there is more than 6 frags,
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1210 "unmapping OAL area.\n");
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1215 dma_unmap_len(&tx_ring_desc->map[i],
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1230 /* Map the buffers for this transmit. This will return
1231 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1233 static int qlge_map_send(struct qlge_adapter *qdev,
1234 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1235 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1237 int len = skb_headlen(skb);
1239 int frag_idx, err, map_idx = 0;
1240 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1241 int frag_cnt = skb_shinfo(skb)->nr_frags;
1244 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1245 "frag_cnt = %d.\n", frag_cnt);
1248 * Map the skb buffer first.
1250 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1252 err = dma_mapping_error(&qdev->pdev->dev, map);
1254 netif_err(qdev, tx_queued, qdev->ndev,
1255 "PCI mapping failed with error: %d\n", err);
1257 return NETDEV_TX_BUSY;
1260 tbd->len = cpu_to_le32(len);
1261 tbd->addr = cpu_to_le64(map);
1262 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1263 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1267 * This loop fills the remainder of the 8 address descriptors
1268 * in the IOCB. If there are more than 7 fragments, then the
1269 * eighth address desc will point to an external list (OAL).
1270 * When this happens, the remainder of the frags will be stored
1273 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1277 if (frag_idx == 6 && frag_cnt > 7) {
1278 /* Let's tack on an sglist.
1279 * Our control block will now
1281 * iocb->seg[0] = skb->data
1282 * iocb->seg[1] = frag[0]
1283 * iocb->seg[2] = frag[1]
1284 * iocb->seg[3] = frag[2]
1285 * iocb->seg[4] = frag[3]
1286 * iocb->seg[5] = frag[4]
1287 * iocb->seg[6] = frag[5]
1288 * iocb->seg[7] = ptr to OAL (external sglist)
1289 * oal->seg[0] = frag[6]
1290 * oal->seg[1] = frag[7]
1291 * oal->seg[2] = frag[8]
1292 * oal->seg[3] = frag[9]
1293 * oal->seg[4] = frag[10]
1296 /* Tack on the OAL in the eighth segment of IOCB. */
1297 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1298 sizeof(struct qlge_oal),
1300 err = dma_mapping_error(&qdev->pdev->dev, map);
1302 netif_err(qdev, tx_queued, qdev->ndev,
1303 "PCI mapping outbound address list with error: %d\n",
1308 tbd->addr = cpu_to_le64(map);
1310 * The length is the number of fragments
1311 * that remain to be mapped times the length
1312 * of our sglist (OAL).
1315 cpu_to_le32((sizeof(struct tx_buf_desc) *
1316 (frag_cnt - frag_idx)) | TX_DESC_C);
1317 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1319 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1320 sizeof(struct qlge_oal));
1321 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1325 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1328 err = dma_mapping_error(&qdev->pdev->dev, map);
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping frags failed with error: %d.\n",
1336 tbd->addr = cpu_to_le64(map);
1337 tbd->len = cpu_to_le32(skb_frag_size(frag));
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1340 skb_frag_size(frag));
1342 /* Save the number of segments we've mapped. */
1343 tx_ring_desc->map_cnt = map_idx;
1344 /* Terminate the last segment. */
1345 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1346 return NETDEV_TX_OK;
1350 * If the first frag mapping failed, then i will be zero.
1351 * This causes the unmap of the skb->data area. Otherwise
1352 * we pass in the number of frags that mapped successfully
1353 * so they can be umapped.
1355 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1356 return NETDEV_TX_BUSY;
1359 /* Categorizing receive firmware frame errors */
1360 static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1361 struct rx_ring *rx_ring)
1363 struct nic_stats *stats = &qdev->nic_stats;
1365 stats->rx_err_count++;
1366 rx_ring->rx_errors++;
1368 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1369 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1370 stats->rx_code_err++;
1372 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1373 stats->rx_oversize_err++;
1375 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1376 stats->rx_undersize_err++;
1378 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1379 stats->rx_preamble_err++;
1381 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1382 stats->rx_frame_len_err++;
1384 case IB_MAC_IOCB_RSP_ERR_CRC:
1385 stats->rx_crc_err++;
1393 * qlge_update_mac_hdr_len - helper routine to update the mac header length
1394 * based on vlan tags if present
1396 static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1397 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1398 void *page, size_t *len)
1402 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1404 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1406 /* Look for stacked vlan tags in ethertype field */
1407 if (tags[6] == ETH_P_8021Q &&
1408 tags[8] == ETH_P_8021Q)
1409 *len += 2 * VLAN_HLEN;
1415 /* Process an inbound completion from an rx ring. */
1416 static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1417 struct rx_ring *rx_ring,
1418 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1419 u32 length, u16 vlan_id)
1421 struct sk_buff *skb;
1422 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1423 struct napi_struct *napi = &rx_ring->napi;
1425 /* Frame error, so drop the packet. */
1426 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1427 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1428 put_page(lbq_desc->p.pg_chunk.page);
1431 napi->dev = qdev->ndev;
1433 skb = napi_get_frags(napi);
1435 netif_err(qdev, drv, qdev->ndev,
1436 "Couldn't get an skb, exiting.\n");
1437 rx_ring->rx_dropped++;
1438 put_page(lbq_desc->p.pg_chunk.page);
1441 prefetch(lbq_desc->p.pg_chunk.va);
1442 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1443 lbq_desc->p.pg_chunk.page,
1444 lbq_desc->p.pg_chunk.offset,
1448 skb->data_len += length;
1449 skb->truesize += length;
1450 skb_shinfo(skb)->nr_frags++;
1452 rx_ring->rx_packets++;
1453 rx_ring->rx_bytes += length;
1454 skb->ip_summed = CHECKSUM_UNNECESSARY;
1455 skb_record_rx_queue(skb, rx_ring->cq_id);
1456 if (vlan_id != 0xffff)
1457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1458 napi_gro_frags(napi);
1461 /* Process an inbound completion from an rx ring. */
1462 static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1463 struct rx_ring *rx_ring,
1464 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1465 u32 length, u16 vlan_id)
1467 struct net_device *ndev = qdev->ndev;
1468 struct sk_buff *skb = NULL;
1470 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1471 struct napi_struct *napi = &rx_ring->napi;
1472 size_t hlen = ETH_HLEN;
1474 skb = netdev_alloc_skb(ndev, length);
1476 rx_ring->rx_dropped++;
1477 put_page(lbq_desc->p.pg_chunk.page);
1481 addr = lbq_desc->p.pg_chunk.va;
1484 /* Frame error, so drop the packet. */
1485 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1486 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1490 /* Update the MAC header length*/
1491 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1493 /* The max framesize filter on this chip is set higher than
1494 * MTU since FCoE uses 2k frames.
1496 if (skb->len > ndev->mtu + hlen) {
1497 netif_err(qdev, drv, qdev->ndev,
1498 "Segment too small, dropping.\n");
1499 rx_ring->rx_dropped++;
1502 skb_put_data(skb, addr, hlen);
1503 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1504 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1506 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1507 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1508 skb->len += length - hlen;
1509 skb->data_len += length - hlen;
1510 skb->truesize += length - hlen;
1512 rx_ring->rx_packets++;
1513 rx_ring->rx_bytes += skb->len;
1514 skb->protocol = eth_type_trans(skb, ndev);
1515 skb_checksum_none_assert(skb);
1517 if ((ndev->features & NETIF_F_RXCSUM) &&
1518 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1520 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "TCP checksum done!\n");
1523 skb->ip_summed = CHECKSUM_UNNECESSARY;
1524 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1525 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1526 /* Unfragmented ipv4 UDP frame. */
1528 (struct iphdr *)((u8 *)addr + hlen);
1529 if (!(iph->frag_off &
1530 htons(IP_MF | IP_OFFSET))) {
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 netif_printk(qdev, rx_status, KERN_DEBUG,
1534 "UDP checksum done!\n");
1539 skb_record_rx_queue(skb, rx_ring->cq_id);
1540 if (vlan_id != 0xffff)
1541 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1542 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1543 napi_gro_receive(napi, skb);
1545 netif_receive_skb(skb);
1548 dev_kfree_skb_any(skb);
1549 put_page(lbq_desc->p.pg_chunk.page);
1552 /* Process an inbound completion from an rx ring. */
1553 static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1554 struct rx_ring *rx_ring,
1555 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1556 u32 length, u16 vlan_id)
1558 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1559 struct net_device *ndev = qdev->ndev;
1560 struct sk_buff *skb, *new_skb;
1562 skb = sbq_desc->p.skb;
1563 /* Allocate new_skb and copy */
1564 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1566 rx_ring->rx_dropped++;
1569 skb_reserve(new_skb, NET_IP_ALIGN);
1571 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1572 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1574 skb_put_data(new_skb, skb->data, length);
1578 /* Frame error, so drop the packet. */
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1580 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1581 dev_kfree_skb_any(skb);
1585 /* loopback self test for ethtool */
1586 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1587 qlge_check_lb_frame(qdev, skb);
1588 dev_kfree_skb_any(skb);
1592 /* The max framesize filter on this chip is set higher than
1593 * MTU since FCoE uses 2k frames.
1595 if (skb->len > ndev->mtu + ETH_HLEN) {
1596 dev_kfree_skb_any(skb);
1597 rx_ring->rx_dropped++;
1601 prefetch(skb->data);
1602 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1603 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1607 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1608 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1609 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1610 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1612 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1613 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1614 "Promiscuous Packet.\n");
1616 rx_ring->rx_packets++;
1617 rx_ring->rx_bytes += skb->len;
1618 skb->protocol = eth_type_trans(skb, ndev);
1619 skb_checksum_none_assert(skb);
1621 /* If rx checksum is on, and there are no
1622 * csum or frame errors.
1624 if ((ndev->features & NETIF_F_RXCSUM) &&
1625 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1627 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1628 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1629 "TCP checksum done!\n");
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1632 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1633 /* Unfragmented ipv4 UDP frame. */
1634 struct iphdr *iph = (struct iphdr *)skb->data;
1636 if (!(iph->frag_off &
1637 htons(IP_MF | IP_OFFSET))) {
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1639 netif_printk(qdev, rx_status, KERN_DEBUG,
1641 "UDP checksum done!\n");
1646 skb_record_rx_queue(skb, rx_ring->cq_id);
1647 if (vlan_id != 0xffff)
1648 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1649 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1650 napi_gro_receive(&rx_ring->napi, skb);
1652 netif_receive_skb(skb);
1655 static void qlge_realign_skb(struct sk_buff *skb, int len)
1657 void *temp_addr = skb->data;
1659 /* Undo the skb_reserve(skb,32) we did before
1660 * giving to hardware, and realign data on
1661 * a 2-byte boundary.
1663 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1664 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 memmove(skb->data, temp_addr, len);
1669 * This function builds an skb for the given inbound
1670 * completion. It will be rewritten for readability in the near
1671 * future, but for not it works well.
1673 static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1674 struct rx_ring *rx_ring,
1675 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1677 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1678 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1679 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1680 struct sk_buff *skb = NULL;
1681 size_t hlen = ETH_HLEN;
1684 * Handle the header buffer if present.
1686 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1687 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "Header of %d bytes in small buffer.\n", hdr_len);
1691 * Headers fit nicely into a small buffer.
1693 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1694 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1695 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1696 skb = sbq_desc->p.skb;
1697 qlge_realign_skb(skb, hdr_len);
1698 skb_put(skb, hdr_len);
1699 sbq_desc->p.skb = NULL;
1703 * Handle the data buffer(s).
1705 if (unlikely(!length)) { /* Is there data too? */
1706 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1707 "No Data buffer in this packet.\n");
1711 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1712 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714 "Headers in small, data of %d bytes in small, combine them.\n",
1717 * Data is less than small buffer size so it's
1718 * stuffed in a small buffer.
1719 * For this case we append the data
1720 * from the "data" small buffer to the "header" small
1723 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1724 dma_sync_single_for_cpu(&qdev->pdev->dev,
1728 skb_put_data(skb, sbq_desc->p.skb->data, length);
1730 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1731 "%d bytes in a single small buffer.\n",
1733 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1734 skb = sbq_desc->p.skb;
1735 qlge_realign_skb(skb, length);
1736 skb_put(skb, length);
1737 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1740 sbq_desc->p.skb = NULL;
1742 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1743 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1744 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1745 "Header in small, %d bytes in large. Chain large to small!\n",
1748 * The data is in a single large buffer. We
1749 * chain it to the header buffer's skb and let
1752 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1753 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1754 "Chaining page at offset = %d, for %d bytes to skb.\n",
1755 lbq_desc->p.pg_chunk.offset, length);
1756 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1757 lbq_desc->p.pg_chunk.offset, length);
1759 skb->data_len += length;
1760 skb->truesize += length;
1763 * The headers and data are in a single large buffer. We
1764 * copy it to a new skb and let it go. This can happen with
1765 * jumbo mtu on a non-TCP/UDP frame.
1767 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1768 skb = netdev_alloc_skb(qdev->ndev, length);
1770 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1771 "No skb available, drop the packet.\n");
1774 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1777 skb_reserve(skb, NET_IP_ALIGN);
1778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1781 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782 lbq_desc->p.pg_chunk.offset,
1785 skb->data_len += length;
1786 skb->truesize += length;
1787 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1788 lbq_desc->p.pg_chunk.va,
1790 __pskb_pull_tail(skb, hlen);
1794 * The data is in a chain of large buffers
1795 * pointed to by a small buffer. We loop
1796 * thru and chain them to the our small header
1798 * frags: There are 18 max frags and our small
1799 * buffer will hold 32 of them. The thing is,
1800 * we'll use 3 max for our 9000 byte jumbo
1801 * frames. If the MTU goes up we could
1802 * eventually be in trouble.
1806 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1807 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1808 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1809 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1811 * This is an non TCP/UDP IP frame, so
1812 * the headers aren't split into a small
1813 * buffer. We have to use the small buffer
1814 * that contains our sg list as our skb to
1815 * send upstairs. Copy the sg list here to
1816 * a local buffer and use it to find the
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "%d bytes of headers & data in chain of large.\n",
1822 skb = sbq_desc->p.skb;
1823 sbq_desc->p.skb = NULL;
1824 skb_reserve(skb, NET_IP_ALIGN);
1827 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1828 size = min(length, qdev->lbq_buf_size);
1830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Adding page %d to skb for %d bytes.\n",
1833 skb_fill_page_desc(skb, i,
1834 lbq_desc->p.pg_chunk.page,
1835 lbq_desc->p.pg_chunk.offset, size);
1837 skb->data_len += size;
1838 skb->truesize += size;
1841 } while (length > 0);
1842 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1844 __pskb_pull_tail(skb, hlen);
1849 /* Process an inbound completion from an rx ring. */
1850 static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1851 struct rx_ring *rx_ring,
1852 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1855 struct net_device *ndev = qdev->ndev;
1856 struct sk_buff *skb = NULL;
1858 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1859 if (unlikely(!skb)) {
1860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "No skb available, drop packet.\n");
1862 rx_ring->rx_dropped++;
1866 /* Frame error, so drop the packet. */
1867 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1868 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1869 dev_kfree_skb_any(skb);
1873 /* The max framesize filter on this chip is set higher than
1874 * MTU since FCoE uses 2k frames.
1876 if (skb->len > ndev->mtu + ETH_HLEN) {
1877 dev_kfree_skb_any(skb);
1878 rx_ring->rx_dropped++;
1882 /* loopback self test for ethtool */
1883 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1884 qlge_check_lb_frame(qdev, skb);
1885 dev_kfree_skb_any(skb);
1889 prefetch(skb->data);
1890 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1891 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1894 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1895 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1896 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1897 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1898 rx_ring->rx_multicast++;
1900 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1901 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1902 "Promiscuous Packet.\n");
1905 skb->protocol = eth_type_trans(skb, ndev);
1906 skb_checksum_none_assert(skb);
1908 /* If rx checksum is on, and there are no
1909 * csum or frame errors.
1911 if ((ndev->features & NETIF_F_RXCSUM) &&
1912 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1914 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1915 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1916 "TCP checksum done!\n");
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1919 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1920 /* Unfragmented ipv4 UDP frame. */
1921 struct iphdr *iph = (struct iphdr *)skb->data;
1923 if (!(iph->frag_off &
1924 htons(IP_MF | IP_OFFSET))) {
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1927 "TCP checksum done!\n");
1932 rx_ring->rx_packets++;
1933 rx_ring->rx_bytes += skb->len;
1934 skb_record_rx_queue(skb, rx_ring->cq_id);
1935 if (vlan_id != 0xffff)
1936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1937 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1938 napi_gro_receive(&rx_ring->napi, skb);
1940 netif_receive_skb(skb);
1943 /* Process an inbound completion from an rx ring. */
1944 static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1945 struct rx_ring *rx_ring,
1946 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1948 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1949 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1950 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1951 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1952 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1954 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1955 /* The data and headers are split into
1958 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1960 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1961 /* The data fit in a single small buffer.
1962 * Allocate a new skb, copy the data and
1963 * return the buffer to the free pool.
1965 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1967 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1968 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1969 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1970 /* TCP packet in a page chunk that's been checksummed.
1971 * Tack it on to our GRO skb and let it go.
1973 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1975 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1976 /* Non-TCP packet in a page chunk. Allocate an
1977 * skb, tack it on frags, and send it up.
1979 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1982 /* Non-TCP/UDP large frames that span multiple buffers
1983 * can be processed corrrectly by the split frame logic.
1985 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1989 return (unsigned long)length;
1992 /* Process an outbound completion from an rx ring. */
1993 static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1994 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1996 struct tx_ring *tx_ring;
1997 struct tx_ring_desc *tx_ring_desc;
1999 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2000 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2001 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2002 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2003 tx_ring->tx_packets++;
2004 dev_kfree_skb(tx_ring_desc->skb);
2005 tx_ring_desc->skb = NULL;
2007 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2010 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Total descriptor length did not match transfer length.\n");
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too short to be valid, not sent.\n");
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "Frame too long, but sent anyway.\n");
2023 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2024 netif_warn(qdev, tx_done, qdev->ndev,
2025 "PCI backplane error. Frame not sent.\n");
2028 atomic_inc(&tx_ring->tx_count);
2031 /* Fire up a handler to reset the MPI processor. */
2032 void qlge_queue_fw_error(struct qlge_adapter *qdev)
2034 qlge_link_off(qdev);
2035 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2038 void qlge_queue_asic_error(struct qlge_adapter *qdev)
2040 qlge_link_off(qdev);
2041 qlge_disable_interrupts(qdev);
2042 /* Clear adapter up bit to signal the recovery
2043 * process that it shouldn't kill the reset worker
2046 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2047 /* Set asic recovery bit to indicate reset process that we are
2048 * in fatal error recovery process rather than normal close
2050 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2051 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2054 static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2055 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2057 switch (ib_ae_rsp->event) {
2058 case MGMT_ERR_EVENT:
2059 netif_err(qdev, rx_err, qdev->ndev,
2060 "Management Processor Fatal Error.\n");
2061 qlge_queue_fw_error(qdev);
2064 case CAM_LOOKUP_ERR_EVENT:
2065 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2066 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2067 qlge_queue_asic_error(qdev);
2070 case SOFT_ECC_ERROR_EVENT:
2071 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2072 qlge_queue_asic_error(qdev);
2075 case PCI_ERR_ANON_BUF_RD:
2076 netdev_err(qdev->ndev,
2077 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2079 qlge_queue_asic_error(qdev);
2083 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2085 qlge_queue_asic_error(qdev);
2090 static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2092 struct qlge_adapter *qdev = rx_ring->qdev;
2093 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2094 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2097 struct tx_ring *tx_ring;
2098 /* While there are entries in the completion queue. */
2099 while (prod != rx_ring->cnsmr_idx) {
2100 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2101 "cq_id = %d, prod = %d, cnsmr = %d\n",
2102 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2104 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2106 switch (net_rsp->opcode) {
2107 case OPCODE_OB_MAC_TSO_IOCB:
2108 case OPCODE_OB_MAC_IOCB:
2109 qlge_process_mac_tx_intr(qdev, net_rsp);
2112 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2113 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2117 qlge_update_cq(rx_ring);
2118 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2122 qlge_write_cq_idx(rx_ring);
2123 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2124 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2125 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2127 * The queue got stopped because the tx_ring was full.
2128 * Wake it up, because it's now at least 25% empty.
2130 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2136 static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2138 struct qlge_adapter *qdev = rx_ring->qdev;
2139 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2140 struct qlge_net_rsp_iocb *net_rsp;
2143 /* While there are entries in the completion queue. */
2144 while (prod != rx_ring->cnsmr_idx) {
2145 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146 "cq_id = %d, prod = %d, cnsmr = %d\n",
2147 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2149 net_rsp = rx_ring->curr_entry;
2151 switch (net_rsp->opcode) {
2152 case OPCODE_IB_MAC_IOCB:
2153 qlge_process_mac_rx_intr(qdev, rx_ring,
2154 (struct qlge_ib_mac_iocb_rsp *)
2158 case OPCODE_IB_AE_IOCB:
2159 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2163 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2169 qlge_update_cq(rx_ring);
2170 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2171 if (count == budget)
2174 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2175 qlge_write_cq_idx(rx_ring);
2179 static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2181 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2182 struct qlge_adapter *qdev = rx_ring->qdev;
2183 struct rx_ring *trx_ring;
2184 int i, work_done = 0;
2185 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2187 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2188 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2190 /* Service the TX rings first. They start
2191 * right after the RSS rings.
2193 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2194 trx_ring = &qdev->rx_ring[i];
2195 /* If this TX completion ring belongs to this vector and
2196 * it's not empty then service it.
2198 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2199 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2200 trx_ring->cnsmr_idx)) {
2201 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2202 "%s: Servicing TX completion ring %d.\n",
2203 __func__, trx_ring->cq_id);
2204 qlge_clean_outbound_rx_ring(trx_ring);
2209 * Now service the RSS ring if it's active.
2211 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2212 rx_ring->cnsmr_idx) {
2213 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2214 "%s: Servicing RX completion ring %d.\n",
2215 __func__, rx_ring->cq_id);
2216 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2219 if (work_done < budget) {
2220 napi_complete_done(napi, work_done);
2221 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2226 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2228 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2230 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2231 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2232 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2234 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2239 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2240 * based on the features to enable/disable hardware vlan accel
2242 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2243 netdev_features_t features)
2245 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2246 bool need_restart = netif_running(ndev);
2250 status = qlge_adapter_down(qdev);
2252 netif_err(qdev, link, qdev->ndev,
2253 "Failed to bring down the adapter\n");
2258 /* update the features with resent change */
2259 ndev->features = features;
2262 status = qlge_adapter_up(qdev);
2264 netif_err(qdev, link, qdev->ndev,
2265 "Failed to bring up the adapter\n");
2273 static int qlge_set_features(struct net_device *ndev,
2274 netdev_features_t features)
2276 netdev_features_t changed = ndev->features ^ features;
2279 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2280 /* Update the behavior of vlan accel in the adapter */
2281 err = qlge_update_hw_vlan_features(ndev, features);
2285 qlge_vlan_mode(ndev, features);
2291 static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2293 u32 enable_bit = MAC_ADDR_E;
2296 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2297 MAC_ADDR_TYPE_VLAN, vid);
2299 netif_err(qdev, ifup, qdev->ndev,
2300 "Failed to init vlan address.\n");
2304 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2306 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2310 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2314 err = __qlge_vlan_rx_add_vid(qdev, vid);
2315 set_bit(vid, qdev->active_vlans);
2317 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2322 static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2327 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to clear vlan address.\n");
2335 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2337 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2341 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2345 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2346 clear_bit(vid, qdev->active_vlans);
2348 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2353 static void qlge_restore_vlan(struct qlge_adapter *qdev)
2358 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2362 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2363 __qlge_vlan_rx_add_vid(qdev, vid);
2365 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2368 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2369 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2371 struct rx_ring *rx_ring = dev_id;
2373 napi_schedule(&rx_ring->napi);
2377 /* This handles a fatal error, MPI activity, and the default
2378 * rx_ring in an MSI-X multiple vector environment.
2379 * In MSI/Legacy environment it also process the rest of
2382 static irqreturn_t qlge_isr(int irq, void *dev_id)
2384 struct rx_ring *rx_ring = dev_id;
2385 struct qlge_adapter *qdev = rx_ring->qdev;
2386 struct intr_context *intr_context = &qdev->intr_context[0];
2390 /* Experience shows that when using INTx interrupts, interrupts must
2391 * be masked manually.
2392 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2393 * (even though it is auto-masked), otherwise a later command to
2394 * enable it is not effective.
2396 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2397 qlge_disable_completion_interrupt(qdev, 0);
2399 var = qlge_read32(qdev, STS);
2402 * Check for fatal error.
2405 qlge_disable_completion_interrupt(qdev, 0);
2406 qlge_queue_asic_error(qdev);
2407 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2408 var = qlge_read32(qdev, ERR_STS);
2409 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2414 * Check MPI processor activity.
2416 if ((var & STS_PI) &&
2417 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2419 * We've got an async event or mailbox completion.
2420 * Handle it and clear the source of the interrupt.
2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
2424 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2425 queue_delayed_work_on(smp_processor_id(),
2426 qdev->workqueue, &qdev->mpi_work, 0);
2431 * Get the bit-mask that shows the active queues for this
2432 * pass. Compare it to the queues that this irq services
2433 * and call napi if there's a match.
2435 var = qlge_read32(qdev, ISR1);
2436 if (var & intr_context->irq_mask) {
2437 netif_info(qdev, intr, qdev->ndev,
2438 "Waking handler for rx_ring[0].\n");
2439 napi_schedule(&rx_ring->napi);
2442 /* Experience shows that the device sometimes signals an
2443 * interrupt but no work is scheduled from this function.
2444 * Nevertheless, the interrupt is auto-masked. Therefore, we
2445 * systematically re-enable the interrupt if we didn't
2448 qlge_enable_completion_interrupt(qdev, 0);
2451 return work_done ? IRQ_HANDLED : IRQ_NONE;
2454 static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2456 if (skb_is_gso(skb)) {
2458 __be16 l3_proto = vlan_get_protocol(skb);
2460 err = skb_cow_head(skb, 0);
2464 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2465 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2466 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2467 mac_iocb_ptr->total_hdrs_len =
2468 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2469 mac_iocb_ptr->net_trans_offset =
2470 cpu_to_le16(skb_network_offset(skb) |
2471 skb_transport_offset(skb)
2472 << OB_MAC_TRANSPORT_HDR_SHIFT);
2473 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2474 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2475 if (likely(l3_proto == htons(ETH_P_IP))) {
2476 struct iphdr *iph = ip_hdr(skb);
2479 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2480 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2484 } else if (l3_proto == htons(ETH_P_IPV6)) {
2485 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2486 tcp_hdr(skb)->check =
2487 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2488 &ipv6_hdr(skb)->daddr,
2496 static void qlge_hw_csum_setup(struct sk_buff *skb,
2497 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2500 struct iphdr *iph = ip_hdr(skb);
2503 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2504 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2505 mac_iocb_ptr->net_trans_offset =
2506 cpu_to_le16(skb_network_offset(skb) |
2507 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2510 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2511 if (likely(iph->protocol == IPPROTO_TCP)) {
2512 check = &(tcp_hdr(skb)->check);
2513 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2514 mac_iocb_ptr->total_hdrs_len =
2515 cpu_to_le16(skb_transport_offset(skb) +
2516 (tcp_hdr(skb)->doff << 2));
2518 check = &(udp_hdr(skb)->check);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2520 mac_iocb_ptr->total_hdrs_len =
2521 cpu_to_le16(skb_transport_offset(skb) +
2522 sizeof(struct udphdr));
2524 *check = ~csum_tcpudp_magic(iph->saddr,
2525 iph->daddr, len, iph->protocol, 0);
2528 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2530 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2531 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2532 struct tx_ring_desc *tx_ring_desc;
2534 struct tx_ring *tx_ring;
2535 u32 tx_ring_idx = (u32)skb->queue_mapping;
2537 tx_ring = &qdev->tx_ring[tx_ring_idx];
2539 if (skb_padto(skb, ETH_ZLEN))
2540 return NETDEV_TX_OK;
2542 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2543 netif_info(qdev, tx_queued, qdev->ndev,
2544 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2545 __func__, tx_ring_idx);
2546 netif_stop_subqueue(ndev, tx_ring->wq_id);
2547 tx_ring->tx_errors++;
2548 return NETDEV_TX_BUSY;
2550 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2551 mac_iocb_ptr = tx_ring_desc->queue_entry;
2552 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2554 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2555 mac_iocb_ptr->tid = tx_ring_desc->index;
2556 /* We use the upper 32-bits to store the tx queue for this IO.
2557 * When we get the completion we can use it to establish the context.
2559 mac_iocb_ptr->txq_idx = tx_ring_idx;
2560 tx_ring_desc->skb = skb;
2562 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2564 if (skb_vlan_tag_present(skb)) {
2565 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2566 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2567 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2568 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2570 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2572 dev_kfree_skb_any(skb);
2573 return NETDEV_TX_OK;
2574 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2575 qlge_hw_csum_setup(skb,
2576 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2578 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2580 netif_err(qdev, tx_queued, qdev->ndev,
2581 "Could not map the segments.\n");
2582 tx_ring->tx_errors++;
2583 return NETDEV_TX_BUSY;
2586 tx_ring->prod_idx++;
2587 if (tx_ring->prod_idx == tx_ring->wq_len)
2588 tx_ring->prod_idx = 0;
2591 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2592 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2593 "tx queued, slot %d, len %d\n",
2594 tx_ring->prod_idx, skb->len);
2596 atomic_dec(&tx_ring->tx_count);
2598 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2599 netif_stop_subqueue(ndev, tx_ring->wq_id);
2600 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2602 * The queue got stopped because the tx_ring was full.
2603 * Wake it up, because it's now at least 25% empty.
2605 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2607 return NETDEV_TX_OK;
2610 static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2612 if (qdev->rx_ring_shadow_reg_area) {
2613 dma_free_coherent(&qdev->pdev->dev,
2615 qdev->rx_ring_shadow_reg_area,
2616 qdev->rx_ring_shadow_reg_dma);
2617 qdev->rx_ring_shadow_reg_area = NULL;
2619 if (qdev->tx_ring_shadow_reg_area) {
2620 dma_free_coherent(&qdev->pdev->dev,
2622 qdev->tx_ring_shadow_reg_area,
2623 qdev->tx_ring_shadow_reg_dma);
2624 qdev->tx_ring_shadow_reg_area = NULL;
2628 static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2630 qdev->rx_ring_shadow_reg_area =
2631 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2632 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2633 if (!qdev->rx_ring_shadow_reg_area) {
2634 netif_err(qdev, ifup, qdev->ndev,
2635 "Allocation of RX shadow space failed.\n");
2639 qdev->tx_ring_shadow_reg_area =
2640 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2641 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2642 if (!qdev->tx_ring_shadow_reg_area) {
2643 netif_err(qdev, ifup, qdev->ndev,
2644 "Allocation of TX shadow space failed.\n");
2645 goto err_wqp_sh_area;
2650 dma_free_coherent(&qdev->pdev->dev,
2652 qdev->rx_ring_shadow_reg_area,
2653 qdev->rx_ring_shadow_reg_dma);
2657 static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2659 struct tx_ring_desc *tx_ring_desc;
2661 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2663 mac_iocb_ptr = tx_ring->wq_base;
2664 tx_ring_desc = tx_ring->q;
2665 for (i = 0; i < tx_ring->wq_len; i++) {
2666 tx_ring_desc->index = i;
2667 tx_ring_desc->skb = NULL;
2668 tx_ring_desc->queue_entry = mac_iocb_ptr;
2672 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2675 static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2676 struct tx_ring *tx_ring)
2678 if (tx_ring->wq_base) {
2679 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2680 tx_ring->wq_base, tx_ring->wq_base_dma);
2681 tx_ring->wq_base = NULL;
2687 static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2688 struct tx_ring *tx_ring)
2691 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2692 &tx_ring->wq_base_dma, GFP_ATOMIC);
2694 if (!tx_ring->wq_base ||
2695 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2699 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2706 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2707 tx_ring->wq_base, tx_ring->wq_base_dma);
2708 tx_ring->wq_base = NULL;
2710 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2714 static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2716 struct qlge_bq *lbq = &rx_ring->lbq;
2717 unsigned int last_offset;
2719 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2720 while (lbq->next_to_clean != lbq->next_to_use) {
2721 struct qlge_bq_desc *lbq_desc =
2722 &lbq->queue[lbq->next_to_clean];
2724 if (lbq_desc->p.pg_chunk.offset == last_offset)
2725 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2726 qlge_lbq_block_size(qdev),
2728 put_page(lbq_desc->p.pg_chunk.page);
2730 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2733 if (rx_ring->master_chunk.page) {
2734 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2735 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2736 put_page(rx_ring->master_chunk.page);
2737 rx_ring->master_chunk.page = NULL;
2741 static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2745 for (i = 0; i < QLGE_BQ_LEN; i++) {
2746 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2749 netif_err(qdev, ifup, qdev->ndev,
2750 "sbq_desc %d is NULL.\n", i);
2753 if (sbq_desc->p.skb) {
2754 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2757 dev_kfree_skb(sbq_desc->p.skb);
2758 sbq_desc->p.skb = NULL;
2763 /* Free all large and small rx buffers associated
2764 * with the completion queues for this device.
2766 static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2770 for (i = 0; i < qdev->rx_ring_count; i++) {
2771 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2773 if (rx_ring->lbq.queue)
2774 qlge_free_lbq_buffers(qdev, rx_ring);
2775 if (rx_ring->sbq.queue)
2776 qlge_free_sbq_buffers(qdev, rx_ring);
2780 static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2784 for (i = 0; i < qdev->rss_ring_count; i++)
2785 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2789 static int qlge_init_bq(struct qlge_bq *bq)
2791 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2792 struct qlge_adapter *qdev = rx_ring->qdev;
2793 struct qlge_bq_desc *bq_desc;
2797 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2798 &bq->base_dma, GFP_ATOMIC);
2800 netif_err(qdev, ifup, qdev->ndev,
2801 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2802 bq_type_name[bq->type]);
2806 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2812 bq_desc = &bq->queue[0];
2813 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2814 bq_desc->p.skb = NULL;
2816 bq_desc->buf_ptr = buf_ptr;
2822 static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2823 struct rx_ring *rx_ring)
2825 /* Free the small buffer queue. */
2826 if (rx_ring->sbq.base) {
2827 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2828 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2829 rx_ring->sbq.base = NULL;
2832 /* Free the small buffer queue control blocks. */
2833 kfree(rx_ring->sbq.queue);
2834 rx_ring->sbq.queue = NULL;
2836 /* Free the large buffer queue. */
2837 if (rx_ring->lbq.base) {
2838 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2839 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2840 rx_ring->lbq.base = NULL;
2843 /* Free the large buffer queue control blocks. */
2844 kfree(rx_ring->lbq.queue);
2845 rx_ring->lbq.queue = NULL;
2847 /* Free the rx queue. */
2848 if (rx_ring->cq_base) {
2849 dma_free_coherent(&qdev->pdev->dev,
2851 rx_ring->cq_base, rx_ring->cq_base_dma);
2852 rx_ring->cq_base = NULL;
2856 /* Allocate queues and buffers for this completions queue based
2857 * on the values in the parameter structure.
2859 static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2860 struct rx_ring *rx_ring)
2863 * Allocate the completion queue for this rx_ring.
2866 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2867 &rx_ring->cq_base_dma, GFP_ATOMIC);
2869 if (!rx_ring->cq_base) {
2870 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2874 if (rx_ring->cq_id < qdev->rss_ring_count &&
2875 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2876 qlge_free_rx_resources(qdev, rx_ring);
2883 static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2885 struct tx_ring *tx_ring;
2886 struct tx_ring_desc *tx_ring_desc;
2890 * Loop through all queues and free
2893 for (j = 0; j < qdev->tx_ring_count; j++) {
2894 tx_ring = &qdev->tx_ring[j];
2895 for (i = 0; i < tx_ring->wq_len; i++) {
2896 tx_ring_desc = &tx_ring->q[i];
2897 if (tx_ring_desc && tx_ring_desc->skb) {
2898 netif_err(qdev, ifdown, qdev->ndev,
2899 "Freeing lost SKB %p, from queue %d, index %d.\n",
2900 tx_ring_desc->skb, j,
2901 tx_ring_desc->index);
2902 qlge_unmap_send(qdev, tx_ring_desc,
2903 tx_ring_desc->map_cnt);
2904 dev_kfree_skb(tx_ring_desc->skb);
2905 tx_ring_desc->skb = NULL;
2911 static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2915 for (i = 0; i < qdev->tx_ring_count; i++)
2916 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2917 for (i = 0; i < qdev->rx_ring_count; i++)
2918 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2919 qlge_free_shadow_space(qdev);
2922 static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2926 /* Allocate space for our shadow registers and such. */
2927 if (qlge_alloc_shadow_space(qdev))
2930 for (i = 0; i < qdev->rx_ring_count; i++) {
2931 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2932 netif_err(qdev, ifup, qdev->ndev,
2933 "RX resource allocation failed.\n");
2937 /* Allocate tx queue resources */
2938 for (i = 0; i < qdev->tx_ring_count; i++) {
2939 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2940 netif_err(qdev, ifup, qdev->ndev,
2941 "TX resource allocation failed.\n");
2948 qlge_free_mem_resources(qdev);
2952 /* Set up the rx ring control block and pass it to the chip.
2953 * The control block is defined as
2954 * "Completion Queue Initialization Control Block", or cqicb.
2956 static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2958 struct cqicb *cqicb = &rx_ring->cqicb;
2959 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2960 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2961 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2962 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2963 void __iomem *doorbell_area =
2964 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2967 __le64 *base_indirect_ptr;
2970 /* Set up the shadow registers for this ring. */
2971 rx_ring->prod_idx_sh_reg = shadow_reg;
2972 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2973 *rx_ring->prod_idx_sh_reg = 0;
2974 shadow_reg += sizeof(u64);
2975 shadow_reg_dma += sizeof(u64);
2976 rx_ring->lbq.base_indirect = shadow_reg;
2977 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2978 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2979 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2980 rx_ring->sbq.base_indirect = shadow_reg;
2981 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2983 /* PCI doorbell mem area + 0x00 for consumer index register */
2984 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2985 rx_ring->cnsmr_idx = 0;
2986 rx_ring->curr_entry = rx_ring->cq_base;
2988 /* PCI doorbell mem area + 0x04 for valid register */
2989 rx_ring->valid_db_reg = doorbell_area + 0x04;
2991 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2992 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2994 /* PCI doorbell mem area + 0x1c */
2995 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2997 memset((void *)cqicb, 0, sizeof(struct cqicb));
2998 cqicb->msix_vect = rx_ring->irq;
3000 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3003 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3005 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3008 * Set up the control block load flags.
3010 cqicb->flags = FLAGS_LC | /* Load queue base address */
3011 FLAGS_LV | /* Load MSI-X vector */
3012 FLAGS_LI; /* Load irq delay values */
3013 if (rx_ring->cq_id < qdev->rss_ring_count) {
3014 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3015 tmp = (u64)rx_ring->lbq.base_dma;
3016 base_indirect_ptr = rx_ring->lbq.base_indirect;
3019 *base_indirect_ptr = cpu_to_le64(tmp);
3020 tmp += DB_PAGE_SIZE;
3021 base_indirect_ptr++;
3023 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3024 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3025 cqicb->lbq_buf_size =
3026 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3027 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3028 rx_ring->lbq.next_to_use = 0;
3029 rx_ring->lbq.next_to_clean = 0;
3031 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3032 tmp = (u64)rx_ring->sbq.base_dma;
3033 base_indirect_ptr = rx_ring->sbq.base_indirect;
3036 *base_indirect_ptr = cpu_to_le64(tmp);
3037 tmp += DB_PAGE_SIZE;
3038 base_indirect_ptr++;
3040 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3042 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3043 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3044 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3045 rx_ring->sbq.next_to_use = 0;
3046 rx_ring->sbq.next_to_clean = 0;
3048 if (rx_ring->cq_id < qdev->rss_ring_count) {
3049 /* Inbound completion handling rx_rings run in
3050 * separate NAPI contexts.
3052 netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
3054 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3055 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3057 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3058 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3060 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3061 CFG_LCQ, rx_ring->cq_id);
3063 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3069 static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3071 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3072 void __iomem *doorbell_area =
3073 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3074 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3075 (tx_ring->wq_id * sizeof(u64));
3076 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3077 (tx_ring->wq_id * sizeof(u64));
3081 * Assign doorbell registers for this tx_ring.
3083 /* TX PCI doorbell mem area for tx producer index */
3084 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3085 tx_ring->prod_idx = 0;
3086 /* TX PCI doorbell mem area + 0x04 */
3087 tx_ring->valid_db_reg = doorbell_area + 0x04;
3090 * Assign shadow registers for this tx_ring.
3092 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3093 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3095 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3096 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3097 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3098 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3100 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3102 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3104 qlge_init_tx_ring(qdev, tx_ring);
3106 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3107 (u16)tx_ring->wq_id);
3109 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3115 static void qlge_disable_msix(struct qlge_adapter *qdev)
3117 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3118 pci_disable_msix(qdev->pdev);
3119 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3120 kfree(qdev->msi_x_entry);
3121 qdev->msi_x_entry = NULL;
3122 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3123 pci_disable_msi(qdev->pdev);
3124 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3128 /* We start by trying to get the number of vectors
3129 * stored in qdev->intr_count. If we don't get that
3130 * many then we reduce the count and try again.
3132 static void qlge_enable_msix(struct qlge_adapter *qdev)
3136 /* Get the MSIX vectors. */
3137 if (qlge_irq_type == MSIX_IRQ) {
3138 /* Try to alloc space for the msix struct,
3139 * if it fails then go to MSI/legacy.
3141 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3142 sizeof(struct msix_entry),
3144 if (!qdev->msi_x_entry) {
3145 qlge_irq_type = MSI_IRQ;
3149 for (i = 0; i < qdev->intr_count; i++)
3150 qdev->msi_x_entry[i].entry = i;
3152 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3153 1, qdev->intr_count);
3155 kfree(qdev->msi_x_entry);
3156 qdev->msi_x_entry = NULL;
3157 netif_warn(qdev, ifup, qdev->ndev,
3158 "MSI-X Enable failed, trying MSI.\n");
3159 qlge_irq_type = MSI_IRQ;
3161 qdev->intr_count = err;
3162 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3163 netif_info(qdev, ifup, qdev->ndev,
3164 "MSI-X Enabled, got %d vectors.\n",
3170 qdev->intr_count = 1;
3171 if (qlge_irq_type == MSI_IRQ) {
3172 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3173 set_bit(QL_MSI_ENABLED, &qdev->flags);
3174 netif_info(qdev, ifup, qdev->ndev,
3175 "Running with MSI interrupts.\n");
3179 qlge_irq_type = LEG_IRQ;
3180 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3181 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3182 "Running with legacy interrupts.\n");
3185 /* Each vector services 1 RSS ring and 1 or more
3186 * TX completion rings. This function loops through
3187 * the TX completion rings and assigns the vector that
3188 * will service it. An example would be if there are
3189 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3190 * This would mean that vector 0 would service RSS ring 0
3191 * and TX completion rings 0,1,2 and 3. Vector 1 would
3192 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3194 static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3197 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3199 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3200 /* Assign irq vectors to TX rx_rings.*/
3201 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3202 i < qdev->rx_ring_count; i++) {
3203 if (j == tx_rings_per_vector) {
3207 qdev->rx_ring[i].irq = vect;
3211 /* For single vector all rings have an irq
3214 for (i = 0; i < qdev->rx_ring_count; i++)
3215 qdev->rx_ring[i].irq = 0;
3219 /* Set the interrupt mask for this vector. Each vector
3220 * will service 1 RSS ring and 1 or more TX completion
3221 * rings. This function sets up a bit mask per vector
3222 * that indicates which rings it services.
3224 static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3226 int j, vect = ctx->intr;
3227 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3229 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3230 /* Add the RSS ring serviced by this vector
3233 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3234 /* Add the TX ring(s) serviced by this vector
3237 for (j = 0; j < tx_rings_per_vector; j++) {
3239 (1 << qdev->rx_ring[qdev->rss_ring_count +
3240 (vect * tx_rings_per_vector) + j].cq_id);
3243 /* For single vector we just shift each queue's
3246 for (j = 0; j < qdev->rx_ring_count; j++)
3247 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3252 * Here we build the intr_context structures based on
3253 * our rx_ring count and intr vector count.
3254 * The intr_context structure is used to hook each vector
3255 * to possibly different handlers.
3257 static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3260 struct intr_context *intr_context = &qdev->intr_context[0];
3262 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3263 /* Each rx_ring has it's
3264 * own intr_context since we have separate
3265 * vectors for each queue.
3267 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3268 qdev->rx_ring[i].irq = i;
3269 intr_context->intr = i;
3270 intr_context->qdev = qdev;
3271 /* Set up this vector's bit-mask that indicates
3272 * which queues it services.
3274 qlge_set_irq_mask(qdev, intr_context);
3276 * We set up each vectors enable/disable/read bits so
3277 * there's no bit/mask calculations in the critical path.
3279 intr_context->intr_en_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3283 intr_context->intr_dis_mask =
3284 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3285 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3287 intr_context->intr_read_mask =
3288 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3289 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3292 /* The first vector/queue handles
3293 * broadcast/multicast, fatal errors,
3294 * and firmware events. This in addition
3295 * to normal inbound NAPI processing.
3297 intr_context->handler = qlge_isr;
3298 sprintf(intr_context->name, "%s-rx-%d",
3299 qdev->ndev->name, i);
3302 * Inbound queues handle unicast frames only.
3304 intr_context->handler = qlge_msix_rx_isr;
3305 sprintf(intr_context->name, "%s-rx-%d",
3306 qdev->ndev->name, i);
3311 * All rx_rings use the same intr_context since
3312 * there is only one vector.
3314 intr_context->intr = 0;
3315 intr_context->qdev = qdev;
3317 * We set up each vectors enable/disable/read bits so
3318 * there's no bit/mask calculations in the critical path.
3320 intr_context->intr_en_mask =
3321 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3322 intr_context->intr_dis_mask =
3323 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3324 INTR_EN_TYPE_DISABLE;
3325 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3326 /* Experience shows that when using INTx interrupts,
3327 * the device does not always auto-mask INTR_EN_EN.
3328 * Moreover, masking INTR_EN_EN manually does not
3329 * immediately prevent interrupt generation.
3331 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3333 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3335 intr_context->intr_read_mask =
3336 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3338 * Single interrupt means one handler for all rings.
3340 intr_context->handler = qlge_isr;
3341 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3342 /* Set up this vector's bit-mask that indicates
3343 * which queues it services. In this case there is
3344 * a single vector so it will service all RSS and
3345 * TX completion rings.
3347 qlge_set_irq_mask(qdev, intr_context);
3349 /* Tell the TX completion rings which MSIx vector
3350 * they will be using.
3352 qlge_set_tx_vect(qdev);
3355 static void qlge_free_irq(struct qlge_adapter *qdev)
3358 struct intr_context *intr_context = &qdev->intr_context[0];
3360 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3361 if (intr_context->hooked) {
3362 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3363 free_irq(qdev->msi_x_entry[i].vector,
3366 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3370 qlge_disable_msix(qdev);
3373 static int qlge_request_irq(struct qlge_adapter *qdev)
3377 struct pci_dev *pdev = qdev->pdev;
3378 struct intr_context *intr_context = &qdev->intr_context[0];
3380 qlge_resolve_queues_to_irqs(qdev);
3382 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3383 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3384 status = request_irq(qdev->msi_x_entry[i].vector,
3385 intr_context->handler,
3390 netif_err(qdev, ifup, qdev->ndev,
3391 "Failed request for MSIX interrupt %d.\n",
3396 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3397 "trying msi or legacy interrupts.\n");
3398 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3399 "%s: irq = %d.\n", __func__, pdev->irq);
3400 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3401 "%s: context->name = %s.\n", __func__,
3402 intr_context->name);
3403 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3404 "%s: dev_id = 0x%p.\n", __func__,
3407 request_irq(pdev->irq, qlge_isr,
3408 test_bit(QL_MSI_ENABLED, &qdev->flags)
3411 intr_context->name, &qdev->rx_ring[0]);
3415 netif_err(qdev, ifup, qdev->ndev,
3416 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3417 intr_context->name);
3419 intr_context->hooked = 1;
3423 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3424 qlge_free_irq(qdev);
3428 static int qlge_start_rss(struct qlge_adapter *qdev)
3430 static const u8 init_hash_seed[] = {
3431 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3432 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3433 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3434 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3435 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3437 struct ricb *ricb = &qdev->ricb;
3440 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3442 memset((void *)ricb, 0, sizeof(*ricb));
3444 ricb->base_cq = RSS_L4K;
3446 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3447 ricb->mask = cpu_to_le16((u16)(0x3ff));
3450 * Fill out the Indirection Table.
3452 for (i = 0; i < 1024; i++)
3453 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3455 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3456 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3458 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3460 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3466 static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3470 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3473 /* Clear all the entries in the routing table. */
3474 for (i = 0; i < 16; i++) {
3475 status = qlge_set_routing_reg(qdev, i, 0, 0);
3477 netif_err(qdev, ifup, qdev->ndev,
3478 "Failed to init routing register for CAM packets.\n");
3482 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3486 /* Initialize the frame-to-queue routing. */
3487 static int qlge_route_initialize(struct qlge_adapter *qdev)
3491 /* Clear all the entries in the routing table. */
3492 status = qlge_clear_routing_entries(qdev);
3496 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3500 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3501 RT_IDX_IP_CSUM_ERR, 1);
3503 netif_err(qdev, ifup, qdev->ndev,
3504 "Failed to init routing register for IP CSUM error packets.\n");
3507 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3508 RT_IDX_TU_CSUM_ERR, 1);
3510 netif_err(qdev, ifup, qdev->ndev,
3511 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3514 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3516 netif_err(qdev, ifup, qdev->ndev,
3517 "Failed to init routing register for broadcast packets.\n");
3520 /* If we have more than one inbound queue, then turn on RSS in the
3523 if (qdev->rss_ring_count > 1) {
3524 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3525 RT_IDX_RSS_MATCH, 1);
3527 netif_err(qdev, ifup, qdev->ndev,
3528 "Failed to init routing register for MATCH RSS packets.\n");
3533 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3536 netif_err(qdev, ifup, qdev->ndev,
3537 "Failed to init routing register for CAM packets.\n");
3539 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3543 int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3547 /* If check if the link is up and use to
3548 * determine if we are setting or clearing
3549 * the MAC address in the CAM.
3551 set = qlge_read32(qdev, STS);
3552 set &= qdev->port_link_up;
3553 status = qlge_set_mac_addr(qdev, set);
3555 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3559 status = qlge_route_initialize(qdev);
3561 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3566 static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3573 * Set up the System register to halt on errors.
3575 value = SYS_EFE | SYS_FAE;
3577 qlge_write32(qdev, SYS, mask | value);
3579 /* Set the default queue, and VLAN behavior. */
3580 value = NIC_RCV_CFG_DFQ;
3581 mask = NIC_RCV_CFG_DFQ_MASK;
3582 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3583 value |= NIC_RCV_CFG_RV;
3584 mask |= (NIC_RCV_CFG_RV << 16);
3586 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3588 /* Set the MPI interrupt to enabled. */
3589 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3591 /* Enable the function, set pagesize, enable error checking. */
3592 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3593 FSC_EC | FSC_VM_PAGE_4K;
3594 value |= SPLT_SETTING;
3596 /* Set/clear header splitting. */
3597 mask = FSC_VM_PAGESIZE_MASK |
3598 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3599 qlge_write32(qdev, FSC, mask | value);
3601 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3603 /* Set RX packet routing to use port/pci function on which the
3604 * packet arrived on in addition to usual frame routing.
3605 * This is helpful on bonding where both interfaces can have
3606 * the same MAC address.
3608 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3609 /* Reroute all packets to our Interface.
3610 * They may have been routed to MPI firmware
3613 value = qlge_read32(qdev, MGMT_RCV_CFG);
3614 value &= ~MGMT_RCV_CFG_RM;
3617 /* Sticky reg needs clearing due to WOL. */
3618 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3619 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3621 /* Default WOL is enable on Mezz cards */
3622 if (qdev->pdev->subsystem_device == 0x0068 ||
3623 qdev->pdev->subsystem_device == 0x0180)
3624 qdev->wol = WAKE_MAGIC;
3626 /* Start up the rx queues. */
3627 for (i = 0; i < qdev->rx_ring_count; i++) {
3628 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3630 netif_err(qdev, ifup, qdev->ndev,
3631 "Failed to start rx ring[%d].\n", i);
3636 /* If there is more than one inbound completion queue
3637 * then download a RICB to configure RSS.
3639 if (qdev->rss_ring_count > 1) {
3640 status = qlge_start_rss(qdev);
3642 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3647 /* Start up the tx queues. */
3648 for (i = 0; i < qdev->tx_ring_count; i++) {
3649 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3651 netif_err(qdev, ifup, qdev->ndev,
3652 "Failed to start tx ring[%d].\n", i);
3657 /* Initialize the port and set the max framesize. */
3658 status = qdev->nic_ops->port_initialize(qdev);
3660 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3662 /* Set up the MAC address and frame routing filter. */
3663 status = qlge_cam_route_initialize(qdev);
3665 netif_err(qdev, ifup, qdev->ndev,
3666 "Failed to init CAM/Routing tables.\n");
3670 /* Start NAPI for the RSS queues. */
3671 for (i = 0; i < qdev->rss_ring_count; i++)
3672 napi_enable(&qdev->rx_ring[i].napi);
3677 /* Issue soft reset to chip. */
3678 static int qlge_adapter_reset(struct qlge_adapter *qdev)
3682 unsigned long end_jiffies;
3684 /* Clear all the entries in the routing table. */
3685 status = qlge_clear_routing_entries(qdev);
3687 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3691 /* Check if bit is set then skip the mailbox command and
3692 * clear the bit, else we are in normal reset process.
3694 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3695 /* Stop management traffic. */
3696 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3698 /* Wait for the NIC and MGMNT FIFOs to empty. */
3699 qlge_wait_fifo_empty(qdev);
3701 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3704 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3706 end_jiffies = jiffies + usecs_to_jiffies(30);
3708 value = qlge_read32(qdev, RST_FO);
3709 if ((value & RST_FO_FR) == 0)
3712 } while (time_before(jiffies, end_jiffies));
3714 if (value & RST_FO_FR) {
3715 netif_err(qdev, ifdown, qdev->ndev,
3716 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3717 status = -ETIMEDOUT;
3720 /* Resume management traffic. */
3721 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3725 static void qlge_display_dev_info(struct net_device *ndev)
3727 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3729 netif_info(qdev, probe, qdev->ndev,
3730 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3733 qdev->chip_rev_id & 0x0000000f,
3734 qdev->chip_rev_id >> 4 & 0x0000000f,
3735 qdev->chip_rev_id >> 8 & 0x0000000f,
3736 qdev->chip_rev_id >> 12 & 0x0000000f);
3737 netif_info(qdev, probe, qdev->ndev,
3738 "MAC address %pM\n", ndev->dev_addr);
3741 static int qlge_wol(struct qlge_adapter *qdev)
3744 u32 wol = MB_WOL_DISABLE;
3746 /* The CAM is still intact after a reset, but if we
3747 * are doing WOL, then we may need to program the
3748 * routing regs. We would also need to issue the mailbox
3749 * commands to instruct the MPI what to do per the ethtool
3753 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3754 WAKE_MCAST | WAKE_BCAST)) {
3755 netif_err(qdev, ifdown, qdev->ndev,
3756 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3761 if (qdev->wol & WAKE_MAGIC) {
3762 status = qlge_mb_wol_set_magic(qdev, 1);
3764 netif_err(qdev, ifdown, qdev->ndev,
3765 "Failed to set magic packet on %s.\n",
3769 netif_info(qdev, drv, qdev->ndev,
3770 "Enabled magic packet successfully on %s.\n",
3773 wol |= MB_WOL_MAGIC_PKT;
3777 wol |= MB_WOL_MODE_ON;
3778 status = qlge_mb_wol_mode(qdev, wol);
3779 netif_err(qdev, drv, qdev->ndev,
3780 "WOL %s (wol code 0x%x) on %s\n",
3781 (status == 0) ? "Successfully set" : "Failed",
3782 wol, qdev->ndev->name);
3788 static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3790 /* Don't kill the reset worker thread if we
3791 * are in the process of recovery.
3793 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3794 cancel_delayed_work_sync(&qdev->asic_reset_work);
3795 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3796 cancel_delayed_work_sync(&qdev->mpi_work);
3797 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3798 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3801 static int qlge_adapter_down(struct qlge_adapter *qdev)
3805 qlge_link_off(qdev);
3807 qlge_cancel_all_work_sync(qdev);
3809 for (i = 0; i < qdev->rss_ring_count; i++)
3810 napi_disable(&qdev->rx_ring[i].napi);
3812 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3814 qlge_disable_interrupts(qdev);
3816 qlge_tx_ring_clean(qdev);
3818 /* Call netif_napi_del() from common point.
3820 for (i = 0; i < qdev->rss_ring_count; i++)
3821 netif_napi_del(&qdev->rx_ring[i].napi);
3823 status = qlge_adapter_reset(qdev);
3825 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3827 qlge_free_rx_buffers(qdev);
3832 static int qlge_adapter_up(struct qlge_adapter *qdev)
3836 err = qlge_adapter_initialize(qdev);
3838 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3841 set_bit(QL_ADAPTER_UP, &qdev->flags);
3842 qlge_alloc_rx_buffers(qdev);
3843 /* If the port is initialized and the
3844 * link is up the turn on the carrier.
3846 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3847 (qlge_read32(qdev, STS) & qdev->port_link_up))
3849 /* Restore rx mode. */
3850 clear_bit(QL_ALLMULTI, &qdev->flags);
3851 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3852 qlge_set_multicast_list(qdev->ndev);
3854 /* Restore vlan setting. */
3855 qlge_restore_vlan(qdev);
3857 qlge_enable_interrupts(qdev);
3858 qlge_enable_all_completion_interrupts(qdev);
3859 netif_tx_start_all_queues(qdev->ndev);
3863 qlge_adapter_reset(qdev);
3867 static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3869 qlge_free_mem_resources(qdev);
3870 qlge_free_irq(qdev);
3873 static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3875 if (qlge_alloc_mem_resources(qdev)) {
3876 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3879 return qlge_request_irq(qdev);
3882 static int qlge_close(struct net_device *ndev)
3884 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3887 /* If we hit pci_channel_io_perm_failure
3888 * failure condition, then we already
3889 * brought the adapter down.
3891 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3892 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3893 clear_bit(QL_EEH_FATAL, &qdev->flags);
3898 * Wait for device to recover from a reset.
3899 * (Rarely happens, but possible.)
3901 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3904 /* Make sure refill_work doesn't re-enable napi */
3905 for (i = 0; i < qdev->rss_ring_count; i++)
3906 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3908 qlge_adapter_down(qdev);
3909 qlge_release_adapter_resources(qdev);
3913 static void qlge_set_lb_size(struct qlge_adapter *qdev)
3915 if (qdev->ndev->mtu <= 1500)
3916 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3918 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3919 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3922 static int qlge_configure_rings(struct qlge_adapter *qdev)
3925 struct rx_ring *rx_ring;
3926 struct tx_ring *tx_ring;
3927 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3929 /* In a perfect world we have one RSS ring for each CPU
3930 * and each has it's own vector. To do that we ask for
3931 * cpu_cnt vectors. qlge_enable_msix() will adjust the
3932 * vector count to what we actually get. We then
3933 * allocate an RSS ring for each.
3934 * Essentially, we are doing min(cpu_count, msix_vector_count).
3936 qdev->intr_count = cpu_cnt;
3937 qlge_enable_msix(qdev);
3938 /* Adjust the RSS ring count to the actual vector count. */
3939 qdev->rss_ring_count = qdev->intr_count;
3940 qdev->tx_ring_count = cpu_cnt;
3941 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3943 for (i = 0; i < qdev->tx_ring_count; i++) {
3944 tx_ring = &qdev->tx_ring[i];
3945 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3946 tx_ring->qdev = qdev;
3948 tx_ring->wq_len = qdev->tx_ring_size;
3950 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3953 * The completion queue ID for the tx rings start
3954 * immediately after the rss rings.
3956 tx_ring->cq_id = qdev->rss_ring_count + i;
3959 for (i = 0; i < qdev->rx_ring_count; i++) {
3960 rx_ring = &qdev->rx_ring[i];
3961 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3962 rx_ring->qdev = qdev;
3964 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3965 if (i < qdev->rss_ring_count) {
3967 * Inbound (RSS) queues.
3969 rx_ring->cq_len = qdev->rx_ring_size;
3971 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3972 rx_ring->lbq.type = QLGE_LB;
3973 rx_ring->sbq.type = QLGE_SB;
3974 INIT_DELAYED_WORK(&rx_ring->refill_work,
3978 * Outbound queue handles outbound completions only.
3980 /* outbound cq is same size as tx_ring it services. */
3981 rx_ring->cq_len = qdev->tx_ring_size;
3983 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3989 static int qlge_open(struct net_device *ndev)
3991 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3994 err = qlge_adapter_reset(qdev);
3998 qlge_set_lb_size(qdev);
3999 err = qlge_configure_rings(qdev);
4003 err = qlge_get_adapter_resources(qdev);
4007 err = qlge_adapter_up(qdev);
4014 qlge_release_adapter_resources(qdev);
4018 static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4022 /* Wait for an outstanding reset to complete. */
4023 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4026 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4027 netif_err(qdev, ifup, qdev->ndev,
4028 "Waiting for adapter UP...\n");
4033 netif_err(qdev, ifup, qdev->ndev,
4034 "Timed out waiting for adapter UP\n");
4039 status = qlge_adapter_down(qdev);
4043 qlge_set_lb_size(qdev);
4045 status = qlge_adapter_up(qdev);
4051 netif_alert(qdev, ifup, qdev->ndev,
4052 "Driver up/down cycle failed, closing device.\n");
4053 set_bit(QL_ADAPTER_UP, &qdev->flags);
4054 dev_close(qdev->ndev);
4058 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4060 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4063 if (ndev->mtu == 1500 && new_mtu == 9000)
4064 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4065 else if (ndev->mtu == 9000 && new_mtu == 1500)
4066 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4070 queue_delayed_work(qdev->workqueue,
4071 &qdev->mpi_port_cfg_work, 3 * HZ);
4073 ndev->mtu = new_mtu;
4075 if (!netif_running(qdev->ndev))
4078 status = qlge_change_rx_buffers(qdev);
4080 netif_err(qdev, ifup, qdev->ndev,
4081 "Changing MTU failed.\n");
4087 static struct net_device_stats *qlge_get_stats(struct net_device
4090 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4091 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4092 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4093 unsigned long pkts, mcast, dropped, errors, bytes;
4097 pkts = mcast = dropped = errors = bytes = 0;
4098 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4099 pkts += rx_ring->rx_packets;
4100 bytes += rx_ring->rx_bytes;
4101 dropped += rx_ring->rx_dropped;
4102 errors += rx_ring->rx_errors;
4103 mcast += rx_ring->rx_multicast;
4105 ndev->stats.rx_packets = pkts;
4106 ndev->stats.rx_bytes = bytes;
4107 ndev->stats.rx_dropped = dropped;
4108 ndev->stats.rx_errors = errors;
4109 ndev->stats.multicast = mcast;
4112 pkts = errors = bytes = 0;
4113 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4114 pkts += tx_ring->tx_packets;
4115 bytes += tx_ring->tx_bytes;
4116 errors += tx_ring->tx_errors;
4118 ndev->stats.tx_packets = pkts;
4119 ndev->stats.tx_bytes = bytes;
4120 ndev->stats.tx_errors = errors;
4121 return &ndev->stats;
4124 static void qlge_set_multicast_list(struct net_device *ndev)
4126 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4127 struct netdev_hw_addr *ha;
4130 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4134 * Set or clear promiscuous mode if a
4135 * transition is taking place.
4137 if (ndev->flags & IFF_PROMISC) {
4138 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4139 if (qlge_set_routing_reg
4140 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4141 netif_err(qdev, hw, qdev->ndev,
4142 "Failed to set promiscuous mode.\n");
4144 set_bit(QL_PROMISCUOUS, &qdev->flags);
4148 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4149 if (qlge_set_routing_reg
4150 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4151 netif_err(qdev, hw, qdev->ndev,
4152 "Failed to clear promiscuous mode.\n");
4154 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4160 * Set or clear all multicast mode if a
4161 * transition is taking place.
4163 if ((ndev->flags & IFF_ALLMULTI) ||
4164 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4165 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4166 if (qlge_set_routing_reg
4167 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4168 netif_err(qdev, hw, qdev->ndev,
4169 "Failed to set all-multi mode.\n");
4171 set_bit(QL_ALLMULTI, &qdev->flags);
4175 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4176 if (qlge_set_routing_reg
4177 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4178 netif_err(qdev, hw, qdev->ndev,
4179 "Failed to clear all-multi mode.\n");
4181 clear_bit(QL_ALLMULTI, &qdev->flags);
4186 if (!netdev_mc_empty(ndev)) {
4187 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4191 netdev_for_each_mc_addr(ha, ndev) {
4192 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4193 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4194 netif_err(qdev, hw, qdev->ndev,
4195 "Failed to loadmulticast address.\n");
4196 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4201 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4202 if (qlge_set_routing_reg
4203 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4204 netif_err(qdev, hw, qdev->ndev,
4205 "Failed to set multicast match mode.\n");
4207 set_bit(QL_ALLMULTI, &qdev->flags);
4211 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4214 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4216 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4217 struct sockaddr *addr = p;
4220 if (!is_valid_ether_addr(addr->sa_data))
4221 return -EADDRNOTAVAIL;
4222 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4223 /* Update local copy of current mac address. */
4224 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4226 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4229 status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4230 MAC_ADDR_TYPE_CAM_MAC,
4231 qdev->func * MAX_CQ);
4233 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4234 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4238 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4240 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4242 qlge_queue_asic_error(qdev);
4245 static void qlge_asic_reset_work(struct work_struct *work)
4247 struct qlge_adapter *qdev =
4248 container_of(work, struct qlge_adapter, asic_reset_work.work);
4252 status = qlge_adapter_down(qdev);
4256 status = qlge_adapter_up(qdev);
4260 /* Restore rx mode. */
4261 clear_bit(QL_ALLMULTI, &qdev->flags);
4262 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4263 qlge_set_multicast_list(qdev->ndev);
4268 netif_alert(qdev, ifup, qdev->ndev,
4269 "Driver up/down cycle failed, closing device\n");
4271 set_bit(QL_ADAPTER_UP, &qdev->flags);
4272 dev_close(qdev->ndev);
4276 static const struct nic_operations qla8012_nic_ops = {
4277 .get_flash = qlge_get_8012_flash_params,
4278 .port_initialize = qlge_8012_port_initialize,
4281 static const struct nic_operations qla8000_nic_ops = {
4282 .get_flash = qlge_get_8000_flash_params,
4283 .port_initialize = qlge_8000_port_initialize,
4286 /* Find the pcie function number for the other NIC
4287 * on this chip. Since both NIC functions share a
4288 * common firmware we have the lowest enabled function
4289 * do any common work. Examples would be resetting
4290 * after a fatal firmware error, or doing a firmware
4293 static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4297 u32 nic_func1, nic_func2;
4299 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4304 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4305 MPI_TEST_NIC_FUNC_MASK);
4306 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4307 MPI_TEST_NIC_FUNC_MASK);
4309 if (qdev->func == nic_func1)
4310 qdev->alt_func = nic_func2;
4311 else if (qdev->func == nic_func2)
4312 qdev->alt_func = nic_func1;
4319 static int qlge_get_board_info(struct qlge_adapter *qdev)
4324 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4328 status = qlge_get_alt_pcie_func(qdev);
4332 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4334 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4335 qdev->port_link_up = STS_PL1;
4336 qdev->port_init = STS_PI1;
4337 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4338 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4340 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4341 qdev->port_link_up = STS_PL0;
4342 qdev->port_init = STS_PI0;
4343 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4344 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4346 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4347 qdev->device_id = qdev->pdev->device;
4348 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4349 qdev->nic_ops = &qla8012_nic_ops;
4350 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4351 qdev->nic_ops = &qla8000_nic_ops;
4355 static void qlge_release_all(struct pci_dev *pdev)
4357 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4359 if (qdev->workqueue) {
4360 destroy_workqueue(qdev->workqueue);
4361 qdev->workqueue = NULL;
4365 iounmap(qdev->reg_base);
4366 if (qdev->doorbell_area)
4367 iounmap(qdev->doorbell_area);
4368 vfree(qdev->mpi_coredump);
4369 pci_release_regions(pdev);
4372 static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4375 struct net_device *ndev = qdev->ndev;
4378 err = pci_enable_device(pdev);
4380 dev_err(&pdev->dev, "PCI device enable failed.\n");
4385 pci_set_drvdata(pdev, qdev);
4387 /* Set PCIe read request size */
4388 err = pcie_set_readrq(pdev, 4096);
4390 dev_err(&pdev->dev, "Set readrq failed.\n");
4391 goto err_disable_pci;
4394 err = pci_request_regions(pdev, DRV_NAME);
4396 dev_err(&pdev->dev, "PCI region request failed.\n");
4397 goto err_disable_pci;
4400 pci_set_master(pdev);
4401 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4402 set_bit(QL_DMA64, &qdev->flags);
4403 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4405 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4407 err = dma_set_coherent_mask(&pdev->dev,
4412 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4413 goto err_release_pci;
4416 /* Set PCIe reset type for EEH to fundamental. */
4417 pdev->needs_freset = 1;
4418 pci_save_state(pdev);
4420 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4421 if (!qdev->reg_base) {
4422 dev_err(&pdev->dev, "Register mapping failed.\n");
4424 goto err_release_pci;
4427 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4428 qdev->doorbell_area =
4429 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4430 if (!qdev->doorbell_area) {
4431 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4433 goto err_iounmap_base;
4436 err = qlge_get_board_info(qdev);
4438 dev_err(&pdev->dev, "Register access failed.\n");
4440 goto err_iounmap_doorbell;
4442 qdev->msg_enable = netif_msg_init(debug, default_msg);
4443 spin_lock_init(&qdev->stats_lock);
4445 if (qlge_mpi_coredump) {
4446 qdev->mpi_coredump =
4447 vmalloc(sizeof(struct qlge_mpi_coredump));
4448 if (!qdev->mpi_coredump) {
4450 goto err_iounmap_doorbell;
4452 if (qlge_force_coredump)
4453 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4455 /* make sure the EEPROM is good */
4456 err = qdev->nic_ops->get_flash(qdev);
4458 dev_err(&pdev->dev, "Invalid FLASH.\n");
4459 goto err_free_mpi_coredump;
4462 /* Keep local copy of current mac address. */
4463 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4465 /* Set up the default ring sizes. */
4466 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4467 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4469 /* Set up the coalescing parameters. */
4470 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4471 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4472 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4473 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4476 * Set up the operating parameters.
4478 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4480 if (!qdev->workqueue) {
4482 goto err_free_mpi_coredump;
4485 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4486 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4487 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4488 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4489 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4490 init_completion(&qdev->ide_completion);
4491 mutex_init(&qdev->mpi_mutex);
4494 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4495 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4496 DRV_NAME, DRV_VERSION);
4500 err_free_mpi_coredump:
4501 vfree(qdev->mpi_coredump);
4502 err_iounmap_doorbell:
4503 iounmap(qdev->doorbell_area);
4505 iounmap(qdev->reg_base);
4507 pci_release_regions(pdev);
4509 pci_disable_device(pdev);
4514 static const struct net_device_ops qlge_netdev_ops = {
4515 .ndo_open = qlge_open,
4516 .ndo_stop = qlge_close,
4517 .ndo_start_xmit = qlge_send,
4518 .ndo_change_mtu = qlge_change_mtu,
4519 .ndo_get_stats = qlge_get_stats,
4520 .ndo_set_rx_mode = qlge_set_multicast_list,
4521 .ndo_set_mac_address = qlge_set_mac_address,
4522 .ndo_validate_addr = eth_validate_addr,
4523 .ndo_tx_timeout = qlge_tx_timeout,
4524 .ndo_set_features = qlge_set_features,
4525 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4526 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4529 static void qlge_timer(struct timer_list *t)
4531 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4534 var = qlge_read32(qdev, STS);
4535 if (pci_channel_offline(qdev->pdev)) {
4536 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4540 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4543 static const struct devlink_ops qlge_devlink_ops;
4545 static int qlge_probe(struct pci_dev *pdev,
4546 const struct pci_device_id *pci_entry)
4548 struct qlge_netdev_priv *ndev_priv;
4549 struct qlge_adapter *qdev = NULL;
4550 struct net_device *ndev = NULL;
4551 struct devlink *devlink;
4552 static int cards_found;
4555 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
4559 qdev = devlink_priv(devlink);
4561 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4563 netif_get_num_default_rss_queues()));
4567 ndev_priv = netdev_priv(ndev);
4568 ndev_priv->qdev = qdev;
4569 ndev_priv->ndev = ndev;
4571 err = qlge_init_device(pdev, qdev, cards_found);
4575 SET_NETDEV_DEV(ndev, &pdev->dev);
4576 ndev->hw_features = NETIF_F_SG |
4580 NETIF_F_HW_VLAN_CTAG_TX |
4581 NETIF_F_HW_VLAN_CTAG_RX |
4582 NETIF_F_HW_VLAN_CTAG_FILTER |
4584 ndev->features = ndev->hw_features;
4585 ndev->vlan_features = ndev->hw_features;
4586 /* vlan gets same features (except vlan filter) */
4587 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4588 NETIF_F_HW_VLAN_CTAG_TX |
4589 NETIF_F_HW_VLAN_CTAG_RX);
4591 if (test_bit(QL_DMA64, &qdev->flags))
4592 ndev->features |= NETIF_F_HIGHDMA;
4595 * Set up net_device structure.
4597 ndev->tx_queue_len = qdev->tx_ring_size;
4598 ndev->irq = pdev->irq;
4600 ndev->netdev_ops = &qlge_netdev_ops;
4601 ndev->ethtool_ops = &qlge_ethtool_ops;
4602 ndev->watchdog_timeo = 10 * HZ;
4604 /* MTU range: this driver only supports 1500 or 9000, so this only
4605 * filters out values above or below, and we'll rely on
4606 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4608 ndev->min_mtu = ETH_DATA_LEN;
4609 ndev->max_mtu = 9000;
4611 err = register_netdev(ndev);
4613 dev_err(&pdev->dev, "net device registration failed.\n");
4614 qlge_release_all(pdev);
4615 pci_disable_device(pdev);
4619 err = devlink_register(devlink, &pdev->dev);
4623 qlge_health_create_reporters(qdev);
4624 /* Start up the timer to trigger EEH if
4627 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4628 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4629 qlge_link_off(qdev);
4630 qlge_display_dev_info(ndev);
4631 atomic_set(&qdev->lb_count, 0);
4638 devlink_free(devlink);
4643 netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4645 return qlge_send(skb, ndev);
4648 int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4650 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4653 static void qlge_remove(struct pci_dev *pdev)
4655 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4656 struct net_device *ndev = qdev->ndev;
4657 struct devlink *devlink = priv_to_devlink(qdev);
4659 del_timer_sync(&qdev->timer);
4660 qlge_cancel_all_work_sync(qdev);
4661 unregister_netdev(ndev);
4662 qlge_release_all(pdev);
4663 pci_disable_device(pdev);
4664 devlink_health_reporter_destroy(qdev->reporter);
4665 devlink_unregister(devlink);
4666 devlink_free(devlink);
4670 /* Clean up resources without touching hardware. */
4671 static void qlge_eeh_close(struct net_device *ndev)
4673 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4676 if (netif_carrier_ok(ndev)) {
4677 netif_carrier_off(ndev);
4678 netif_stop_queue(ndev);
4681 /* Disabling the timer */
4682 qlge_cancel_all_work_sync(qdev);
4684 for (i = 0; i < qdev->rss_ring_count; i++)
4685 netif_napi_del(&qdev->rx_ring[i].napi);
4687 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4688 qlge_tx_ring_clean(qdev);
4689 qlge_free_rx_buffers(qdev);
4690 qlge_release_adapter_resources(qdev);
4694 * This callback is called by the PCI subsystem whenever
4695 * a PCI bus error is detected.
4697 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4698 pci_channel_state_t state)
4700 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4701 struct net_device *ndev = qdev->ndev;
4704 case pci_channel_io_normal:
4705 return PCI_ERS_RESULT_CAN_RECOVER;
4706 case pci_channel_io_frozen:
4707 netif_device_detach(ndev);
4708 del_timer_sync(&qdev->timer);
4709 if (netif_running(ndev))
4710 qlge_eeh_close(ndev);
4711 pci_disable_device(pdev);
4712 return PCI_ERS_RESULT_NEED_RESET;
4713 case pci_channel_io_perm_failure:
4715 "%s: pci_channel_io_perm_failure.\n", __func__);
4716 del_timer_sync(&qdev->timer);
4717 qlge_eeh_close(ndev);
4718 set_bit(QL_EEH_FATAL, &qdev->flags);
4719 return PCI_ERS_RESULT_DISCONNECT;
4722 /* Request a slot reset. */
4723 return PCI_ERS_RESULT_NEED_RESET;
4727 * This callback is called after the PCI buss has been reset.
4728 * Basically, this tries to restart the card from scratch.
4729 * This is a shortened version of the device probe/discovery code,
4730 * it resembles the first-half of the () routine.
4732 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4734 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4736 pdev->error_state = pci_channel_io_normal;
4738 pci_restore_state(pdev);
4739 if (pci_enable_device(pdev)) {
4740 netif_err(qdev, ifup, qdev->ndev,
4741 "Cannot re-enable PCI device after reset.\n");
4742 return PCI_ERS_RESULT_DISCONNECT;
4744 pci_set_master(pdev);
4746 if (qlge_adapter_reset(qdev)) {
4747 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4748 set_bit(QL_EEH_FATAL, &qdev->flags);
4749 return PCI_ERS_RESULT_DISCONNECT;
4752 return PCI_ERS_RESULT_RECOVERED;
4755 static void qlge_io_resume(struct pci_dev *pdev)
4757 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4758 struct net_device *ndev = qdev->ndev;
4761 if (netif_running(ndev)) {
4762 err = qlge_open(ndev);
4764 netif_err(qdev, ifup, qdev->ndev,
4765 "Device initialization failed after reset.\n");
4769 netif_err(qdev, ifup, qdev->ndev,
4770 "Device was not running prior to EEH.\n");
4772 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4773 netif_device_attach(ndev);
4776 static const struct pci_error_handlers qlge_err_handler = {
4777 .error_detected = qlge_io_error_detected,
4778 .slot_reset = qlge_io_slot_reset,
4779 .resume = qlge_io_resume,
4782 static int __maybe_unused qlge_suspend(struct device *dev_d)
4784 struct pci_dev *pdev = to_pci_dev(dev_d);
4785 struct qlge_adapter *qdev;
4786 struct net_device *ndev;
4789 qdev = pci_get_drvdata(pdev);
4791 netif_device_detach(ndev);
4792 del_timer_sync(&qdev->timer);
4794 if (netif_running(ndev)) {
4795 err = qlge_adapter_down(qdev);
4805 static int __maybe_unused qlge_resume(struct device *dev_d)
4807 struct pci_dev *pdev = to_pci_dev(dev_d);
4808 struct qlge_adapter *qdev;
4809 struct net_device *ndev;
4812 qdev = pci_get_drvdata(pdev);
4815 pci_set_master(pdev);
4817 device_wakeup_disable(dev_d);
4819 if (netif_running(ndev)) {
4820 err = qlge_adapter_up(qdev);
4825 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4826 netif_device_attach(ndev);
4831 static void qlge_shutdown(struct pci_dev *pdev)
4833 qlge_suspend(&pdev->dev);
4836 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4838 static struct pci_driver qlge_driver = {
4840 .id_table = qlge_pci_tbl,
4841 .probe = qlge_probe,
4842 .remove = qlge_remove,
4843 .driver.pm = &qlge_pm_ops,
4844 .shutdown = qlge_shutdown,
4845 .err_handler = &qlge_err_handler
4848 module_pci_driver(qlge_driver);