1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic qlge NIC HBA Driver
4 * Copyright (c) 2003-2008 QLogic Corporation
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
45 #include "qlge_devlink.h"
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
61 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
63 static int debug = -1; /* defaults above */
64 module_param(debug, int, 0664);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static int qlge_irq_type = MSIX_IRQ;
71 module_param(qlge_irq_type, int, 0664);
72 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
74 static int qlge_mpi_coredump;
75 module_param(qlge_mpi_coredump, int, 0);
76 MODULE_PARM_DESC(qlge_mpi_coredump,
77 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
79 static int qlge_force_coredump;
80 module_param(qlge_force_coredump, int, 0);
81 MODULE_PARM_DESC(qlge_force_coredump,
82 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
84 static const struct pci_device_id qlge_pci_tbl[] = {
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
93 static int qlge_wol(struct qlge_adapter *);
94 static void qlge_set_multicast_list(struct net_device *);
95 static int qlge_adapter_down(struct qlge_adapter *);
96 static int qlge_adapter_up(struct qlge_adapter *);
98 /* This hardware semaphore causes exclusive access to
99 * resources shared between the NIC driver, MPI firmware,
100 * FCOE firmware and the FC driver.
102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
107 case SEM_XGMAC0_MASK:
108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
110 case SEM_XGMAC1_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
116 case SEM_MAC_ADDR_MASK:
117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
125 case SEM_RT_IDX_MASK:
126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
128 case SEM_PROC_REG_MASK:
129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 return !(qlge_read32(qdev, SEM) & sem_bits);
140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
142 unsigned int wait_count = 30;
145 if (!qlge_sem_trylock(qdev, sem_mask))
148 } while (--wait_count);
152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
154 qlge_write32(qdev, SEM, sem_mask);
155 qlge_read32(qdev, SEM); /* flush */
158 /* This function waits for a specific bit to come ready
159 * in a given register. It is used mostly by the initialize
160 * process, but is also used in kernel thread API such as
161 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 for (count = 0; count < UDELAY_COUNT; count++) {
169 temp = qlge_read32(qdev, reg);
171 /* check for errors */
172 if (temp & err_bit) {
173 netif_alert(qdev, probe, qdev->ndev,
174 "register 0x%.08x access error, value = 0x%.08x!.\n",
177 } else if (temp & bit) {
180 udelay(UDELAY_DELAY);
182 netif_alert(qdev, probe, qdev->ndev,
183 "Timed out waiting for reg %x to come ready.\n", reg);
187 /* The CFG register is used to download TX and RX control blocks
188 * to the chip. This function waits for an operation to complete.
190 static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
195 for (count = 0; count < UDELAY_COUNT; count++) {
196 temp = qlge_read32(qdev, CFG);
201 udelay(UDELAY_DELAY);
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 direction = DMA_TO_DEVICE;
221 direction = DMA_FROM_DEVICE;
223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
233 status = qlge_wait_cfg(qdev, bit);
235 netif_err(qdev, ifup, qdev->ndev,
236 "Timed out waiting for CFG to come ready.\n");
240 qlge_write32(qdev, ICB_L, (u32)map);
241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
243 mask = CFG_Q_MASK | (bit << 16);
244 value = bit | (q_id << CFG_Q_SHIFT);
245 qlge_write32(qdev, CFG, (mask | value));
248 * Wait for the bit to clear after signaling hw.
250 status = qlge_wait_cfg(qdev, bit);
252 qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
258 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
259 int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC: {
268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
271 qlge_write32(qdev, MAC_ADDR_IDX,
272 (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS |
276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
283 qlge_write32(qdev, MAC_ADDR_IDX,
284 (offset++) | /* offset */
285 (index << MAC_ADDR_IDX_SHIFT) | /* index */
286 MAC_ADDR_ADR | MAC_ADDR_RS |
288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 qlge_write32(qdev, MAC_ADDR_IDX,
298 (offset++) | /* offset */
300 << MAC_ADDR_IDX_SHIFT) | /* index */
302 MAC_ADDR_RS | type); /* type */
303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
311 case MAC_ADDR_TYPE_VLAN:
312 case MAC_ADDR_TYPE_MULTI_FLTR:
314 netif_crit(qdev, ifup, qdev->ndev,
315 "Address type %d not yet supported.\n", type);
321 /* Set up a MAC, multicast or VLAN address for the
322 * inbound frame matching.
324 static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
331 case MAC_ADDR_TYPE_MULTI_MAC: {
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
339 qlge_write32(qdev, MAC_ADDR_IDX,
340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346 qlge_write32(qdev, MAC_ADDR_IDX,
347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
354 case MAC_ADDR_TYPE_CAM_MAC: {
356 u32 upper = (addr[0] << 8) | addr[1];
357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 qlge_write32(qdev, MAC_ADDR_IDX,
363 (offset++) | /* offset */
364 (index << MAC_ADDR_IDX_SHIFT) | /* index */
366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
370 qlge_write32(qdev, MAC_ADDR_IDX,
371 (offset++) | /* offset */
372 (index << MAC_ADDR_IDX_SHIFT) | /* index */
374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
378 qlge_write32(qdev, MAC_ADDR_IDX,
379 (offset) | /* offset */
380 (index << MAC_ADDR_IDX_SHIFT) | /* index */
382 /* This field should also include the queue id
383 * and possibly the function id. Right now we hardcode
384 * the route field to NIC core.
386 cam_output = (CAM_OUT_ROUTE_NIC |
387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 (0 << CAM_OUT_CQ_ID_SHIFT));
389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 cam_output |= CAM_OUT_RV;
391 /* route to NIC core */
392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
395 case MAC_ADDR_TYPE_VLAN: {
396 u32 enable_bit = *((u32 *)&addr[0]);
397 /* For VLAN, the addr actually holds a bit that
398 * either enables or disables the vlan id we are
399 * addressing. It's either MAC_ADDR_E on or off.
400 * That's bit-27 we're talking about.
402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
405 qlge_write32(qdev, MAC_ADDR_IDX,
406 offset | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 enable_bit); /* enable/disable */
412 case MAC_ADDR_TYPE_MULTI_FLTR:
414 netif_crit(qdev, ifup, qdev->ndev,
415 "Address type %d not yet supported.\n", type);
421 /* Set or clear MAC address in hardware. We sometimes
422 * have to clear it to prevent wrong frame routing
423 * especially in a bonding environment.
425 static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
428 char zero_mac_addr[ETH_ALEN];
432 addr = &qdev->current_mac_addr[0];
433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 "Set Mac addr %pM\n", addr);
436 eth_zero_addr(zero_mac_addr);
437 addr = &zero_mac_addr[0];
438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 "Clearing MAC address\n");
441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
444 status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
445 MAC_ADDR_TYPE_CAM_MAC,
446 qdev->func * MAX_CQ);
447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
449 netif_err(qdev, ifup, qdev->ndev,
450 "Failed to init mac address.\n");
454 void qlge_link_on(struct qlge_adapter *qdev)
456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 netif_carrier_on(qdev->ndev);
458 qlge_set_mac_addr(qdev, 1);
461 void qlge_link_off(struct qlge_adapter *qdev)
463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 netif_carrier_off(qdev->ndev);
465 qlge_set_mac_addr(qdev, 0);
468 /* Get a specific frame routing value from the CAM.
469 * Used for debug and reg dump.
471 int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
479 qlge_write32(qdev, RT_IDX,
480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
484 *value = qlge_read32(qdev, RT_DATA);
489 /* The NIC function for this chip has 16 routing indexes. Each one can be used
490 * to route different frame types to various inbound queues. We send broadcast/
491 * multicast/error frames to the default queue for slow handling,
492 * and CAM hit/RSS frames to the fast handling queues.
494 static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
497 int status = -EINVAL; /* Return error if no mask match. */
503 value = RT_IDX_DST_CAM_Q | /* dest */
504 RT_IDX_TYPE_NICQ | /* type */
505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
508 case RT_IDX_VALID: /* Promiscuous Mode frames. */
510 value = RT_IDX_DST_DFLT_Q | /* dest */
511 RT_IDX_TYPE_NICQ | /* type */
512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
515 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
517 value = RT_IDX_DST_DFLT_Q | /* dest */
518 RT_IDX_TYPE_NICQ | /* type */
519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
522 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
524 value = RT_IDX_DST_DFLT_Q | /* dest */
525 RT_IDX_TYPE_NICQ | /* type */
526 (RT_IDX_IP_CSUM_ERR_SLOT <<
527 RT_IDX_IDX_SHIFT); /* index */
530 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
532 value = RT_IDX_DST_DFLT_Q | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 RT_IDX_IDX_SHIFT); /* index */
538 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
540 value = RT_IDX_DST_DFLT_Q | /* dest */
541 RT_IDX_TYPE_NICQ | /* type */
542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
547 value = RT_IDX_DST_DFLT_Q | /* dest */
548 RT_IDX_TYPE_NICQ | /* type */
549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
552 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
554 value = RT_IDX_DST_DFLT_Q | /* dest */
555 RT_IDX_TYPE_NICQ | /* type */
556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
559 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
561 value = RT_IDX_DST_RSS | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 case 0: /* Clear the E-bit on an entry. */
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (index << RT_IDX_IDX_SHIFT);/* index */
574 netif_err(qdev, ifup, qdev->ndev,
575 "Mask type %d not yet supported.\n", mask);
581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
584 value |= (enable ? RT_IDX_E : 0);
585 qlge_write32(qdev, RT_IDX, value);
586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
592 static void qlge_enable_interrupts(struct qlge_adapter *qdev)
594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
597 static void qlge_disable_interrupts(struct qlge_adapter *qdev)
599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
602 static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
604 struct intr_context *ctx = &qdev->intr_context[intr];
606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
609 static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
611 struct intr_context *ctx = &qdev->intr_context[intr];
613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
616 static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
620 for (i = 0; i < qdev->intr_count; i++)
621 qlge_enable_completion_interrupt(qdev, i);
624 static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
628 __le16 *flash = (__le16 *)&qdev->flash;
630 status = strncmp((char *)&qdev->flash, str, 4);
632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
636 for (i = 0; i < size; i++)
637 csum += le16_to_cpu(*flash++);
640 netif_err(qdev, ifup, qdev->ndev,
641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
646 static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
649 /* wait for reg to come ready */
650 status = qlge_wait_reg_rdy(qdev,
651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
654 /* set up for reg read */
655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656 /* wait for reg to come ready */
657 status = qlge_wait_reg_rdy(qdev,
658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
661 /* This data is stored on flash as an array of
662 * __le32. Since qlge_read32() returns cpu endian
663 * we need to swap it back.
665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
670 static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
674 __le32 *p = (__le32 *)&qdev->flash;
678 /* Get flash offset for function and adjust
682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
689 size = sizeof(struct flash_params_8000) / sizeof(u32);
690 for (i = 0; i < size; i++, p++) {
691 status = qlge_read_flash_word(qdev, i + offset, p);
693 netif_err(qdev, ifup, qdev->ndev,
694 "Error reading flash.\n");
699 status = qlge_validate_flash(qdev,
700 sizeof(struct flash_params_8000) /
704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
709 /* Extract either manufacturer or BOFM modified
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
721 if (!is_valid_ether_addr(mac_addr)) {
722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
727 eth_hw_addr_set(qdev->ndev, mac_addr);
730 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
734 static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
738 __le32 *p = (__le32 *)&qdev->flash;
740 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
742 /* Second function's parameters follow the first
748 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
751 for (i = 0; i < size; i++, p++) {
752 status = qlge_read_flash_word(qdev, i + offset, p);
754 netif_err(qdev, ifup, qdev->ndev,
755 "Error reading flash.\n");
760 status = qlge_validate_flash(qdev,
761 sizeof(struct flash_params_8012) /
765 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
770 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
775 eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
778 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
782 /* xgmac register are located behind the xgmac_addr and xgmac_data
783 * register pair. Each read/write requires us to wait for the ready
784 * bit before reading/writing the data.
786 static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
789 /* wait for reg to come ready */
790 status = qlge_wait_reg_rdy(qdev,
791 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
794 /* write the data to the data reg */
795 qlge_write32(qdev, XGMAC_DATA, data);
796 /* trigger the write */
797 qlge_write32(qdev, XGMAC_ADDR, reg);
801 /* xgmac register are located behind the xgmac_addr and xgmac_data
802 * register pair. Each read/write requires us to wait for the ready
803 * bit before reading/writing the data.
805 int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
808 /* wait for reg to come ready */
809 status = qlge_wait_reg_rdy(qdev,
810 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
813 /* set up for reg read */
814 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
815 /* wait for reg to come ready */
816 status = qlge_wait_reg_rdy(qdev,
817 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
821 *data = qlge_read32(qdev, XGMAC_DATA);
826 /* This is used for reading the 64-bit statistics regs. */
827 int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
833 status = qlge_read_xgmac_reg(qdev, reg, &lo);
837 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
841 *data = (u64)lo | ((u64)hi << 32);
847 static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
851 * Get MPI firmware version for driver banner
854 status = qlge_mb_about_fw(qdev);
857 status = qlge_mb_get_fw_state(qdev);
860 /* Wake up a worker to get/set the TX/RX frame sizes. */
861 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866 /* Take the MAC Core out of reset.
867 * Enable statistics counting.
868 * Take the transmitter/receiver out of reset.
869 * This functionality may be done in the MPI firmware at a
872 static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
877 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
878 /* Another function has the semaphore, so
879 * wait for the port init bit to come ready.
881 netif_info(qdev, link, qdev->ndev,
882 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
883 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
885 netif_crit(qdev, link, qdev->ndev,
886 "Port initialize timed out.\n");
891 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
892 /* Set the core reset. */
893 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
896 data |= GLOBAL_CFG_RESET;
897 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
901 /* Clear the core reset and turn on jumbo for receiver. */
902 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
903 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
904 data |= GLOBAL_CFG_TX_STAT_EN;
905 data |= GLOBAL_CFG_RX_STAT_EN;
906 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
910 /* Enable transmitter, and clear it's reset. */
911 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
914 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
915 data |= TX_CFG_EN; /* Enable the transmitter. */
916 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
920 /* Enable receiver and clear it's reset. */
921 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
924 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
925 data |= RX_CFG_EN; /* Enable the receiver. */
926 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
932 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
936 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
940 /* Signal to the world that the port is enabled. */
941 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
943 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
947 static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
949 return PAGE_SIZE << qdev->lbq_buf_order;
952 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
954 struct qlge_bq_desc *bq_desc;
956 bq_desc = &bq->queue[bq->next_to_clean];
957 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962 static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
963 struct rx_ring *rx_ring)
965 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
967 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
968 qdev->lbq_buf_size, DMA_FROM_DEVICE);
970 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
971 qlge_lbq_block_size(qdev)) {
972 /* last chunk of the master page */
973 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
974 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
980 /* Update an rx ring index. */
981 static void qlge_update_cq(struct rx_ring *rx_ring)
983 rx_ring->cnsmr_idx++;
984 rx_ring->curr_entry++;
985 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
986 rx_ring->cnsmr_idx = 0;
987 rx_ring->curr_entry = rx_ring->cq_base;
991 static void qlge_write_cq_idx(struct rx_ring *rx_ring)
993 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
996 static const char * const bq_type_name[] = {
1001 /* return 0 or negative error */
1002 static int qlge_refill_sb(struct rx_ring *rx_ring,
1003 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1005 struct qlge_adapter *qdev = rx_ring->qdev;
1006 struct sk_buff *skb;
1008 if (sbq_desc->p.skb)
1011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1012 "ring %u sbq: getting new skb for index %d.\n",
1013 rx_ring->cq_id, sbq_desc->index);
1015 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1018 skb_reserve(skb, QLGE_SB_PAD);
1020 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1023 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1024 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1025 dev_kfree_skb_any(skb);
1028 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1030 sbq_desc->p.skb = skb;
1034 /* return 0 or negative error */
1035 static int qlge_refill_lb(struct rx_ring *rx_ring,
1036 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1038 struct qlge_adapter *qdev = rx_ring->qdev;
1039 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1041 if (!master_chunk->page) {
1043 dma_addr_t dma_addr;
1045 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1046 if (unlikely(!page))
1048 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1049 qlge_lbq_block_size(qdev),
1051 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1052 __free_pages(page, qdev->lbq_buf_order);
1053 netif_err(qdev, drv, qdev->ndev,
1054 "PCI mapping failed.\n");
1057 master_chunk->page = page;
1058 master_chunk->va = page_address(page);
1059 master_chunk->offset = 0;
1060 rx_ring->chunk_dma_addr = dma_addr;
1063 lbq_desc->p.pg_chunk = *master_chunk;
1064 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1065 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1066 lbq_desc->p.pg_chunk.offset);
1068 /* Adjust the master page chunk for next
1071 master_chunk->offset += qdev->lbq_buf_size;
1072 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1073 master_chunk->page = NULL;
1075 master_chunk->va += qdev->lbq_buf_size;
1076 get_page(master_chunk->page);
1082 /* return 0 or negative error */
1083 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1085 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1086 struct qlge_adapter *qdev = rx_ring->qdev;
1087 struct qlge_bq_desc *bq_desc;
1092 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 i = bq->next_to_use;
1098 bq_desc = &bq->queue[i];
1101 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1102 "ring %u %s: try cleaning idx %d\n",
1103 rx_ring->cq_id, bq_type_name[bq->type], i);
1105 if (bq->type == QLGE_SB)
1106 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1108 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1110 netif_err(qdev, ifup, qdev->ndev,
1111 "ring %u %s: Could not get a page chunk, idx %d\n",
1112 rx_ring->cq_id, bq_type_name[bq->type], i);
1119 bq_desc = &bq->queue[0];
1123 } while (refill_count);
1126 if (bq->next_to_use != i) {
1127 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1128 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1129 "ring %u %s: updating prod idx = %d.\n",
1130 rx_ring->cq_id, bq_type_name[bq->type],
1132 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1134 bq->next_to_use = i;
1140 static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1141 unsigned long delay)
1143 bool sbq_fail, lbq_fail;
1145 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1146 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1148 /* Minimum number of buffers needed to be able to receive at least one
1149 * frame of any format:
1150 * sbq: 1 for header + 1 for data
1151 * lbq: mtu 9000 / lb size
1152 * Below this, the queue might stall.
1154 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1155 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1156 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1157 /* Allocations can take a long time in certain cases (ex.
1158 * reclaim). Therefore, use a workqueue for long-running
1161 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1162 &rx_ring->refill_work, delay);
1165 static void qlge_slow_refill(struct work_struct *work)
1167 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1169 struct napi_struct *napi = &rx_ring->napi;
1172 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1176 /* napi_disable() might have prevented incomplete napi work from being
1179 napi_schedule(napi);
1180 /* trigger softirq processing */
1184 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1185 * fails at some stage, or from the interrupt when a tx completes.
1187 static void qlge_unmap_send(struct qlge_adapter *qdev,
1188 struct tx_ring_desc *tx_ring_desc, int mapped)
1192 for (i = 0; i < mapped; i++) {
1193 if (i == 0 || (i == 7 && mapped > 7)) {
1195 * Unmap the skb->data area, or the
1196 * external sglist (AKA the Outbound
1197 * Address List (OAL)).
1198 * If its the zeroeth element, then it's
1199 * the skb->data area. If it's the 7th
1200 * element and there is more than 6 frags,
1204 netif_printk(qdev, tx_done, KERN_DEBUG,
1206 "unmapping OAL area.\n");
1208 dma_unmap_single(&qdev->pdev->dev,
1209 dma_unmap_addr(&tx_ring_desc->map[i],
1211 dma_unmap_len(&tx_ring_desc->map[i],
1215 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1216 "unmapping frag %d.\n", i);
1217 dma_unmap_page(&qdev->pdev->dev,
1218 dma_unmap_addr(&tx_ring_desc->map[i],
1220 dma_unmap_len(&tx_ring_desc->map[i],
1221 maplen), DMA_TO_DEVICE);
1226 /* Map the buffers for this transmit. This will return
1227 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1229 static int qlge_map_send(struct qlge_adapter *qdev,
1230 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1231 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1233 int len = skb_headlen(skb);
1235 int frag_idx, err, map_idx = 0;
1236 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1237 int frag_cnt = skb_shinfo(skb)->nr_frags;
1240 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1241 "frag_cnt = %d.\n", frag_cnt);
1244 * Map the skb buffer first.
1246 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1248 err = dma_mapping_error(&qdev->pdev->dev, map);
1250 netif_err(qdev, tx_queued, qdev->ndev,
1251 "PCI mapping failed with error: %d\n", err);
1253 return NETDEV_TX_BUSY;
1256 tbd->len = cpu_to_le32(len);
1257 tbd->addr = cpu_to_le64(map);
1258 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1259 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1263 * This loop fills the remainder of the 8 address descriptors
1264 * in the IOCB. If there are more than 7 fragments, then the
1265 * eighth address desc will point to an external list (OAL).
1266 * When this happens, the remainder of the frags will be stored
1269 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1270 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1273 if (frag_idx == 6 && frag_cnt > 7) {
1274 /* Let's tack on an sglist.
1275 * Our control block will now
1277 * iocb->seg[0] = skb->data
1278 * iocb->seg[1] = frag[0]
1279 * iocb->seg[2] = frag[1]
1280 * iocb->seg[3] = frag[2]
1281 * iocb->seg[4] = frag[3]
1282 * iocb->seg[5] = frag[4]
1283 * iocb->seg[6] = frag[5]
1284 * iocb->seg[7] = ptr to OAL (external sglist)
1285 * oal->seg[0] = frag[6]
1286 * oal->seg[1] = frag[7]
1287 * oal->seg[2] = frag[8]
1288 * oal->seg[3] = frag[9]
1289 * oal->seg[4] = frag[10]
1292 /* Tack on the OAL in the eighth segment of IOCB. */
1293 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1294 sizeof(struct qlge_oal),
1296 err = dma_mapping_error(&qdev->pdev->dev, map);
1298 netif_err(qdev, tx_queued, qdev->ndev,
1299 "PCI mapping outbound address list with error: %d\n",
1304 tbd->addr = cpu_to_le64(map);
1306 * The length is the number of fragments
1307 * that remain to be mapped times the length
1308 * of our sglist (OAL).
1311 cpu_to_le32((sizeof(struct tx_buf_desc) *
1312 (frag_cnt - frag_idx)) | TX_DESC_C);
1313 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1315 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1316 sizeof(struct qlge_oal));
1317 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1321 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1324 err = dma_mapping_error(&qdev->pdev->dev, map);
1326 netif_err(qdev, tx_queued, qdev->ndev,
1327 "PCI mapping frags failed with error: %d.\n",
1332 tbd->addr = cpu_to_le64(map);
1333 tbd->len = cpu_to_le32(skb_frag_size(frag));
1334 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1335 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1336 skb_frag_size(frag));
1338 /* Save the number of segments we've mapped. */
1339 tx_ring_desc->map_cnt = map_idx;
1340 /* Terminate the last segment. */
1341 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1342 return NETDEV_TX_OK;
1346 * If the first frag mapping failed, then i will be zero.
1347 * This causes the unmap of the skb->data area. Otherwise
1348 * we pass in the number of frags that mapped successfully
1349 * so they can be umapped.
1351 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1352 return NETDEV_TX_BUSY;
1355 /* Categorizing receive firmware frame errors */
1356 static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1357 struct rx_ring *rx_ring)
1359 struct nic_stats *stats = &qdev->nic_stats;
1361 stats->rx_err_count++;
1362 rx_ring->rx_errors++;
1364 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1365 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1366 stats->rx_code_err++;
1368 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1369 stats->rx_oversize_err++;
1371 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1372 stats->rx_undersize_err++;
1374 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1375 stats->rx_preamble_err++;
1377 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1378 stats->rx_frame_len_err++;
1380 case IB_MAC_IOCB_RSP_ERR_CRC:
1381 stats->rx_crc_err++;
1389 * qlge_update_mac_hdr_len - helper routine to update the mac header length
1390 * based on vlan tags if present
1392 static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1393 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1394 void *page, size_t *len)
1398 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1400 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1402 /* Look for stacked vlan tags in ethertype field */
1403 if (tags[6] == ETH_P_8021Q &&
1404 tags[8] == ETH_P_8021Q)
1405 *len += 2 * VLAN_HLEN;
1411 /* Process an inbound completion from an rx ring. */
1412 static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1413 struct rx_ring *rx_ring,
1414 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1415 u32 length, u16 vlan_id)
1417 struct sk_buff *skb;
1418 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1419 struct napi_struct *napi = &rx_ring->napi;
1421 /* Frame error, so drop the packet. */
1422 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1423 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1424 put_page(lbq_desc->p.pg_chunk.page);
1427 napi->dev = qdev->ndev;
1429 skb = napi_get_frags(napi);
1431 netif_err(qdev, drv, qdev->ndev,
1432 "Couldn't get an skb, exiting.\n");
1433 rx_ring->rx_dropped++;
1434 put_page(lbq_desc->p.pg_chunk.page);
1437 prefetch(lbq_desc->p.pg_chunk.va);
1438 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1439 lbq_desc->p.pg_chunk.page,
1440 lbq_desc->p.pg_chunk.offset,
1444 skb->data_len += length;
1445 skb->truesize += length;
1446 skb_shinfo(skb)->nr_frags++;
1448 rx_ring->rx_packets++;
1449 rx_ring->rx_bytes += length;
1450 skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 skb_record_rx_queue(skb, rx_ring->cq_id);
1452 if (vlan_id != 0xffff)
1453 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1454 napi_gro_frags(napi);
1457 /* Process an inbound completion from an rx ring. */
1458 static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1459 struct rx_ring *rx_ring,
1460 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1461 u32 length, u16 vlan_id)
1463 struct net_device *ndev = qdev->ndev;
1464 struct sk_buff *skb = NULL;
1466 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1467 struct napi_struct *napi = &rx_ring->napi;
1468 size_t hlen = ETH_HLEN;
1470 skb = netdev_alloc_skb(ndev, length);
1472 rx_ring->rx_dropped++;
1473 put_page(lbq_desc->p.pg_chunk.page);
1477 addr = lbq_desc->p.pg_chunk.va;
1480 /* Frame error, so drop the packet. */
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1482 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1486 /* Update the MAC header length*/
1487 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1489 /* The max framesize filter on this chip is set higher than
1490 * MTU since FCoE uses 2k frames.
1492 if (skb->len > ndev->mtu + hlen) {
1493 netif_err(qdev, drv, qdev->ndev,
1494 "Segment too small, dropping.\n");
1495 rx_ring->rx_dropped++;
1498 skb_put_data(skb, addr, hlen);
1499 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1500 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1502 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1503 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1504 skb->len += length - hlen;
1505 skb->data_len += length - hlen;
1506 skb->truesize += length - hlen;
1508 rx_ring->rx_packets++;
1509 rx_ring->rx_bytes += skb->len;
1510 skb->protocol = eth_type_trans(skb, ndev);
1511 skb_checksum_none_assert(skb);
1513 if ((ndev->features & NETIF_F_RXCSUM) &&
1514 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1516 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1517 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1518 "TCP checksum done!\n");
1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1521 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1522 /* Unfragmented ipv4 UDP frame. */
1524 (struct iphdr *)((u8 *)addr + hlen);
1525 if (!(iph->frag_off &
1526 htons(IP_MF | IP_OFFSET))) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 netif_printk(qdev, rx_status, KERN_DEBUG,
1530 "UDP checksum done!\n");
1535 skb_record_rx_queue(skb, rx_ring->cq_id);
1536 if (vlan_id != 0xffff)
1537 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1538 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1539 napi_gro_receive(napi, skb);
1541 netif_receive_skb(skb);
1544 dev_kfree_skb_any(skb);
1545 put_page(lbq_desc->p.pg_chunk.page);
1548 /* Process an inbound completion from an rx ring. */
1549 static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1550 struct rx_ring *rx_ring,
1551 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1552 u32 length, u16 vlan_id)
1554 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1555 struct net_device *ndev = qdev->ndev;
1556 struct sk_buff *skb, *new_skb;
1558 skb = sbq_desc->p.skb;
1559 /* Allocate new_skb and copy */
1560 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1562 rx_ring->rx_dropped++;
1565 skb_reserve(new_skb, NET_IP_ALIGN);
1567 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1568 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1570 skb_put_data(new_skb, skb->data, length);
1574 /* Frame error, so drop the packet. */
1575 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1576 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1577 dev_kfree_skb_any(skb);
1581 /* loopback self test for ethtool */
1582 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1583 qlge_check_lb_frame(qdev, skb);
1584 dev_kfree_skb_any(skb);
1588 /* The max framesize filter on this chip is set higher than
1589 * MTU since FCoE uses 2k frames.
1591 if (skb->len > ndev->mtu + ETH_HLEN) {
1592 dev_kfree_skb_any(skb);
1593 rx_ring->rx_dropped++;
1597 prefetch(skb->data);
1598 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1599 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1602 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1603 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1604 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1608 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1609 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1610 "Promiscuous Packet.\n");
1612 rx_ring->rx_packets++;
1613 rx_ring->rx_bytes += skb->len;
1614 skb->protocol = eth_type_trans(skb, ndev);
1615 skb_checksum_none_assert(skb);
1617 /* If rx checksum is on, and there are no
1618 * csum or frame errors.
1620 if ((ndev->features & NETIF_F_RXCSUM) &&
1621 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1623 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1624 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 "TCP checksum done!\n");
1626 skb->ip_summed = CHECKSUM_UNNECESSARY;
1627 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1628 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1629 /* Unfragmented ipv4 UDP frame. */
1630 struct iphdr *iph = (struct iphdr *)skb->data;
1632 if (!(iph->frag_off &
1633 htons(IP_MF | IP_OFFSET))) {
1634 skb->ip_summed = CHECKSUM_UNNECESSARY;
1635 netif_printk(qdev, rx_status, KERN_DEBUG,
1637 "UDP checksum done!\n");
1642 skb_record_rx_queue(skb, rx_ring->cq_id);
1643 if (vlan_id != 0xffff)
1644 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1645 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1646 napi_gro_receive(&rx_ring->napi, skb);
1648 netif_receive_skb(skb);
1651 static void qlge_realign_skb(struct sk_buff *skb, int len)
1653 void *temp_addr = skb->data;
1655 /* Undo the skb_reserve(skb,32) we did before
1656 * giving to hardware, and realign data on
1657 * a 2-byte boundary.
1659 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1660 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1661 memmove(skb->data, temp_addr, len);
1665 * This function builds an skb for the given inbound
1666 * completion. It will be rewritten for readability in the near
1667 * future, but for not it works well.
1669 static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1670 struct rx_ring *rx_ring,
1671 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1673 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1674 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1675 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1676 struct sk_buff *skb = NULL;
1677 size_t hlen = ETH_HLEN;
1680 * Handle the header buffer if present.
1682 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1683 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1684 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1685 "Header of %d bytes in small buffer.\n", hdr_len);
1687 * Headers fit nicely into a small buffer.
1689 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1690 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1691 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1692 skb = sbq_desc->p.skb;
1693 qlge_realign_skb(skb, hdr_len);
1694 skb_put(skb, hdr_len);
1695 sbq_desc->p.skb = NULL;
1699 * Handle the data buffer(s).
1701 if (unlikely(!length)) { /* Is there data too? */
1702 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1703 "No Data buffer in this packet.\n");
1707 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1708 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1709 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1710 "Headers in small, data of %d bytes in small, combine them.\n",
1713 * Data is less than small buffer size so it's
1714 * stuffed in a small buffer.
1715 * For this case we append the data
1716 * from the "data" small buffer to the "header" small
1719 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1720 dma_sync_single_for_cpu(&qdev->pdev->dev,
1724 skb_put_data(skb, sbq_desc->p.skb->data, length);
1726 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1727 "%d bytes in a single small buffer.\n",
1729 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1730 skb = sbq_desc->p.skb;
1731 qlge_realign_skb(skb, length);
1732 skb_put(skb, length);
1733 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1736 sbq_desc->p.skb = NULL;
1738 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "Header in small, %d bytes in large. Chain large to small!\n",
1744 * The data is in a single large buffer. We
1745 * chain it to the header buffer's skb and let
1748 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1749 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1750 "Chaining page at offset = %d, for %d bytes to skb.\n",
1751 lbq_desc->p.pg_chunk.offset, length);
1752 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1753 lbq_desc->p.pg_chunk.offset, length);
1755 skb->data_len += length;
1756 skb->truesize += length;
1759 * The headers and data are in a single large buffer. We
1760 * copy it to a new skb and let it go. This can happen with
1761 * jumbo mtu on a non-TCP/UDP frame.
1763 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1764 skb = netdev_alloc_skb(qdev->ndev, length);
1766 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1767 "No skb available, drop the packet.\n");
1770 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1773 skb_reserve(skb, NET_IP_ALIGN);
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1777 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1778 lbq_desc->p.pg_chunk.offset,
1781 skb->data_len += length;
1782 skb->truesize += length;
1783 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1784 lbq_desc->p.pg_chunk.va,
1786 __pskb_pull_tail(skb, hlen);
1790 * The data is in a chain of large buffers
1791 * pointed to by a small buffer. We loop
1792 * thru and chain them to the our small header
1794 * frags: There are 18 max frags and our small
1795 * buffer will hold 32 of them. The thing is,
1796 * we'll use 3 max for our 9000 byte jumbo
1797 * frames. If the MTU goes up we could
1798 * eventually be in trouble.
1802 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1803 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1804 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1805 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1807 * This is an non TCP/UDP IP frame, so
1808 * the headers aren't split into a small
1809 * buffer. We have to use the small buffer
1810 * that contains our sg list as our skb to
1811 * send upstairs. Copy the sg list here to
1812 * a local buffer and use it to find the
1815 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1816 "%d bytes of headers & data in chain of large.\n",
1818 skb = sbq_desc->p.skb;
1819 sbq_desc->p.skb = NULL;
1820 skb_reserve(skb, NET_IP_ALIGN);
1823 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1824 size = min(length, qdev->lbq_buf_size);
1826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 "Adding page %d to skb for %d bytes.\n",
1829 skb_fill_page_desc(skb, i,
1830 lbq_desc->p.pg_chunk.page,
1831 lbq_desc->p.pg_chunk.offset, size);
1833 skb->data_len += size;
1834 skb->truesize += size;
1837 } while (length > 0);
1838 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1840 __pskb_pull_tail(skb, hlen);
1845 /* Process an inbound completion from an rx ring. */
1846 static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1847 struct rx_ring *rx_ring,
1848 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1851 struct net_device *ndev = qdev->ndev;
1852 struct sk_buff *skb = NULL;
1854 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1855 if (unlikely(!skb)) {
1856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop packet.\n");
1858 rx_ring->rx_dropped++;
1862 /* Frame error, so drop the packet. */
1863 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1864 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1865 dev_kfree_skb_any(skb);
1869 /* The max framesize filter on this chip is set higher than
1870 * MTU since FCoE uses 2k frames.
1872 if (skb->len > ndev->mtu + ETH_HLEN) {
1873 dev_kfree_skb_any(skb);
1874 rx_ring->rx_dropped++;
1878 /* loopback self test for ethtool */
1879 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1880 qlge_check_lb_frame(qdev, skb);
1881 dev_kfree_skb_any(skb);
1885 prefetch(skb->data);
1886 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1887 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1888 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1889 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1890 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1891 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1894 rx_ring->rx_multicast++;
1896 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1897 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1898 "Promiscuous Packet.\n");
1901 skb->protocol = eth_type_trans(skb, ndev);
1902 skb_checksum_none_assert(skb);
1904 /* If rx checksum is on, and there are no
1905 * csum or frame errors.
1907 if ((ndev->features & NETIF_F_RXCSUM) &&
1908 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1910 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1911 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912 "TCP checksum done!\n");
1913 skb->ip_summed = CHECKSUM_UNNECESSARY;
1914 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1915 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1916 /* Unfragmented ipv4 UDP frame. */
1917 struct iphdr *iph = (struct iphdr *)skb->data;
1919 if (!(iph->frag_off &
1920 htons(IP_MF | IP_OFFSET))) {
1921 skb->ip_summed = CHECKSUM_UNNECESSARY;
1922 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923 "TCP checksum done!\n");
1928 rx_ring->rx_packets++;
1929 rx_ring->rx_bytes += skb->len;
1930 skb_record_rx_queue(skb, rx_ring->cq_id);
1931 if (vlan_id != 0xffff)
1932 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1933 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1934 napi_gro_receive(&rx_ring->napi, skb);
1936 netif_receive_skb(skb);
1939 /* Process an inbound completion from an rx ring. */
1940 static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1941 struct rx_ring *rx_ring,
1942 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1944 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1945 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1946 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1947 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1948 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1950 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1951 /* The data and headers are split into
1954 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1956 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1957 /* The data fit in a single small buffer.
1958 * Allocate a new skb, copy the data and
1959 * return the buffer to the free pool.
1961 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1963 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1964 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1965 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1966 /* TCP packet in a page chunk that's been checksummed.
1967 * Tack it on to our GRO skb and let it go.
1969 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1971 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1972 /* Non-TCP packet in a page chunk. Allocate an
1973 * skb, tack it on frags, and send it up.
1975 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1978 /* Non-TCP/UDP large frames that span multiple buffers
1979 * can be processed correctly by the split frame logic.
1981 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1985 return (unsigned long)length;
1988 /* Process an outbound completion from an rx ring. */
1989 static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1990 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1992 struct tx_ring *tx_ring;
1993 struct tx_ring_desc *tx_ring_desc;
1995 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1996 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1997 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1998 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1999 tx_ring->tx_packets++;
2000 dev_kfree_skb(tx_ring_desc->skb);
2001 tx_ring_desc->skb = NULL;
2003 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2006 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2007 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2008 netif_warn(qdev, tx_done, qdev->ndev,
2009 "Total descriptor length did not match transfer length.\n");
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Frame too short to be valid, not sent.\n");
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too long, but sent anyway.\n");
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "PCI backplane error. Frame not sent.\n");
2024 atomic_inc(&tx_ring->tx_count);
2027 /* Fire up a handler to reset the MPI processor. */
2028 void qlge_queue_fw_error(struct qlge_adapter *qdev)
2030 qlge_link_off(qdev);
2031 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2034 void qlge_queue_asic_error(struct qlge_adapter *qdev)
2036 qlge_link_off(qdev);
2037 qlge_disable_interrupts(qdev);
2038 /* Clear adapter up bit to signal the recovery
2039 * process that it shouldn't kill the reset worker
2042 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2043 /* Set asic recovery bit to indicate reset process that we are
2044 * in fatal error recovery process rather than normal close
2046 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2047 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2050 static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2051 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2053 switch (ib_ae_rsp->event) {
2054 case MGMT_ERR_EVENT:
2055 netif_err(qdev, rx_err, qdev->ndev,
2056 "Management Processor Fatal Error.\n");
2057 qlge_queue_fw_error(qdev);
2060 case CAM_LOOKUP_ERR_EVENT:
2061 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2062 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2063 qlge_queue_asic_error(qdev);
2066 case SOFT_ECC_ERROR_EVENT:
2067 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2068 qlge_queue_asic_error(qdev);
2071 case PCI_ERR_ANON_BUF_RD:
2072 netdev_err(qdev->ndev,
2073 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2075 qlge_queue_asic_error(qdev);
2079 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2081 qlge_queue_asic_error(qdev);
2086 static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2088 struct qlge_adapter *qdev = rx_ring->qdev;
2089 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2090 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2093 struct tx_ring *tx_ring;
2094 /* While there are entries in the completion queue. */
2095 while (prod != rx_ring->cnsmr_idx) {
2096 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2097 "cq_id = %d, prod = %d, cnsmr = %d\n",
2098 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2100 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2102 switch (net_rsp->opcode) {
2103 case OPCODE_OB_MAC_TSO_IOCB:
2104 case OPCODE_OB_MAC_IOCB:
2105 qlge_process_mac_tx_intr(qdev, net_rsp);
2108 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2109 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2113 qlge_update_cq(rx_ring);
2114 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2118 qlge_write_cq_idx(rx_ring);
2119 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2120 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2121 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2123 * The queue got stopped because the tx_ring was full.
2124 * Wake it up, because it's now at least 25% empty.
2126 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2132 static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2134 struct qlge_adapter *qdev = rx_ring->qdev;
2135 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2136 struct qlge_net_rsp_iocb *net_rsp;
2139 /* While there are entries in the completion queue. */
2140 while (prod != rx_ring->cnsmr_idx) {
2141 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2142 "cq_id = %d, prod = %d, cnsmr = %d\n",
2143 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2145 net_rsp = rx_ring->curr_entry;
2147 switch (net_rsp->opcode) {
2148 case OPCODE_IB_MAC_IOCB:
2149 qlge_process_mac_rx_intr(qdev, rx_ring,
2150 (struct qlge_ib_mac_iocb_rsp *)
2154 case OPCODE_IB_AE_IOCB:
2155 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2159 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2160 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 qlge_update_cq(rx_ring);
2166 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2167 if (count == budget)
2170 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2171 qlge_write_cq_idx(rx_ring);
2175 static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2177 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2178 struct qlge_adapter *qdev = rx_ring->qdev;
2179 struct rx_ring *trx_ring;
2180 int i, work_done = 0;
2181 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2184 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2186 /* Service the TX rings first. They start
2187 * right after the RSS rings.
2189 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2190 trx_ring = &qdev->rx_ring[i];
2191 /* If this TX completion ring belongs to this vector and
2192 * it's not empty then service it.
2194 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2195 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2196 trx_ring->cnsmr_idx)) {
2197 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2198 "%s: Servicing TX completion ring %d.\n",
2199 __func__, trx_ring->cq_id);
2200 qlge_clean_outbound_rx_ring(trx_ring);
2205 * Now service the RSS ring if it's active.
2207 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2208 rx_ring->cnsmr_idx) {
2209 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2210 "%s: Servicing RX completion ring %d.\n",
2211 __func__, rx_ring->cq_id);
2212 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2215 if (work_done < budget) {
2216 napi_complete_done(napi, work_done);
2217 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2222 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2224 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2226 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2227 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2228 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2230 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2235 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2236 * based on the features to enable/disable hardware vlan accel
2238 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2239 netdev_features_t features)
2241 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2242 bool need_restart = netif_running(ndev);
2246 status = qlge_adapter_down(qdev);
2248 netif_err(qdev, link, qdev->ndev,
2249 "Failed to bring down the adapter\n");
2254 /* update the features with resent change */
2255 ndev->features = features;
2258 status = qlge_adapter_up(qdev);
2260 netif_err(qdev, link, qdev->ndev,
2261 "Failed to bring up the adapter\n");
2269 static int qlge_set_features(struct net_device *ndev,
2270 netdev_features_t features)
2272 netdev_features_t changed = ndev->features ^ features;
2275 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2276 /* Update the behavior of vlan accel in the adapter */
2277 err = qlge_update_hw_vlan_features(ndev, features);
2281 qlge_vlan_mode(ndev, features);
2287 static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2289 u32 enable_bit = MAC_ADDR_E;
2292 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2293 MAC_ADDR_TYPE_VLAN, vid);
2295 netif_err(qdev, ifup, qdev->ndev,
2296 "Failed to init vlan address.\n");
2300 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2302 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2306 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2310 err = __qlge_vlan_rx_add_vid(qdev, vid);
2311 set_bit(vid, qdev->active_vlans);
2313 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2318 static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2323 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2324 MAC_ADDR_TYPE_VLAN, vid);
2326 netif_err(qdev, ifup, qdev->ndev,
2327 "Failed to clear vlan address.\n");
2331 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2333 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2337 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2341 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2342 clear_bit(vid, qdev->active_vlans);
2344 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349 static void qlge_restore_vlan(struct qlge_adapter *qdev)
2354 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2358 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2359 __qlge_vlan_rx_add_vid(qdev, vid);
2361 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2364 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2365 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2367 struct rx_ring *rx_ring = dev_id;
2369 napi_schedule(&rx_ring->napi);
2373 /* This handles a fatal error, MPI activity, and the default
2374 * rx_ring in an MSI-X multiple vector environment.
2375 * In MSI/Legacy environment it also process the rest of
2378 static irqreturn_t qlge_isr(int irq, void *dev_id)
2380 struct rx_ring *rx_ring = dev_id;
2381 struct qlge_adapter *qdev = rx_ring->qdev;
2382 struct intr_context *intr_context = &qdev->intr_context[0];
2386 /* Experience shows that when using INTx interrupts, interrupts must
2387 * be masked manually.
2388 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2389 * (even though it is auto-masked), otherwise a later command to
2390 * enable it is not effective.
2392 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2393 qlge_disable_completion_interrupt(qdev, 0);
2395 var = qlge_read32(qdev, STS);
2398 * Check for fatal error.
2401 qlge_disable_completion_interrupt(qdev, 0);
2402 qlge_queue_asic_error(qdev);
2403 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2404 var = qlge_read32(qdev, ERR_STS);
2405 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2410 * Check MPI processor activity.
2412 if ((var & STS_PI) &&
2413 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2415 * We've got an async event or mailbox completion.
2416 * Handle it and clear the source of the interrupt.
2418 netif_err(qdev, intr, qdev->ndev,
2419 "Got MPI processor interrupt.\n");
2420 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2421 queue_delayed_work_on(smp_processor_id(),
2422 qdev->workqueue, &qdev->mpi_work, 0);
2427 * Get the bit-mask that shows the active queues for this
2428 * pass. Compare it to the queues that this irq services
2429 * and call napi if there's a match.
2431 var = qlge_read32(qdev, ISR1);
2432 if (var & intr_context->irq_mask) {
2433 netif_info(qdev, intr, qdev->ndev,
2434 "Waking handler for rx_ring[0].\n");
2435 napi_schedule(&rx_ring->napi);
2438 /* Experience shows that the device sometimes signals an
2439 * interrupt but no work is scheduled from this function.
2440 * Nevertheless, the interrupt is auto-masked. Therefore, we
2441 * systematically re-enable the interrupt if we didn't
2444 qlge_enable_completion_interrupt(qdev, 0);
2447 return work_done ? IRQ_HANDLED : IRQ_NONE;
2450 static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2452 if (skb_is_gso(skb)) {
2454 __be16 l3_proto = vlan_get_protocol(skb);
2456 err = skb_cow_head(skb, 0);
2460 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2461 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2462 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2463 mac_iocb_ptr->total_hdrs_len =
2464 cpu_to_le16(skb_tcp_all_headers(skb));
2465 mac_iocb_ptr->net_trans_offset =
2466 cpu_to_le16(skb_network_offset(skb) |
2467 skb_transport_offset(skb)
2468 << OB_MAC_TRANSPORT_HDR_SHIFT);
2469 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2470 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2471 if (likely(l3_proto == htons(ETH_P_IP))) {
2472 struct iphdr *iph = ip_hdr(skb);
2475 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2476 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2480 } else if (l3_proto == htons(ETH_P_IPV6)) {
2481 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2482 tcp_hdr(skb)->check =
2483 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2484 &ipv6_hdr(skb)->daddr,
2492 static void qlge_hw_csum_setup(struct sk_buff *skb,
2493 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2496 struct iphdr *iph = ip_hdr(skb);
2499 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2500 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2501 mac_iocb_ptr->net_trans_offset =
2502 cpu_to_le16(skb_network_offset(skb) |
2503 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2505 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2506 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2507 if (likely(iph->protocol == IPPROTO_TCP)) {
2508 check = &(tcp_hdr(skb)->check);
2509 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2510 mac_iocb_ptr->total_hdrs_len =
2511 cpu_to_le16(skb_transport_offset(skb) +
2512 (tcp_hdr(skb)->doff << 2));
2514 check = &(udp_hdr(skb)->check);
2515 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2516 mac_iocb_ptr->total_hdrs_len =
2517 cpu_to_le16(skb_transport_offset(skb) +
2518 sizeof(struct udphdr));
2520 *check = ~csum_tcpudp_magic(iph->saddr,
2521 iph->daddr, len, iph->protocol, 0);
2524 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2526 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2527 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2528 struct tx_ring_desc *tx_ring_desc;
2530 struct tx_ring *tx_ring;
2531 u32 tx_ring_idx = (u32)skb->queue_mapping;
2533 tx_ring = &qdev->tx_ring[tx_ring_idx];
2535 if (skb_padto(skb, ETH_ZLEN))
2536 return NETDEV_TX_OK;
2538 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2539 netif_info(qdev, tx_queued, qdev->ndev,
2540 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2541 __func__, tx_ring_idx);
2542 netif_stop_subqueue(ndev, tx_ring->wq_id);
2543 tx_ring->tx_errors++;
2544 return NETDEV_TX_BUSY;
2546 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2547 mac_iocb_ptr = tx_ring_desc->queue_entry;
2548 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2550 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2551 mac_iocb_ptr->tid = tx_ring_desc->index;
2552 /* We use the upper 32-bits to store the tx queue for this IO.
2553 * When we get the completion we can use it to establish the context.
2555 mac_iocb_ptr->txq_idx = tx_ring_idx;
2556 tx_ring_desc->skb = skb;
2558 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2560 if (skb_vlan_tag_present(skb)) {
2561 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2562 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2563 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2564 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2566 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2568 dev_kfree_skb_any(skb);
2569 return NETDEV_TX_OK;
2570 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2571 qlge_hw_csum_setup(skb,
2572 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2574 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2576 netif_err(qdev, tx_queued, qdev->ndev,
2577 "Could not map the segments.\n");
2578 tx_ring->tx_errors++;
2579 return NETDEV_TX_BUSY;
2582 tx_ring->prod_idx++;
2583 if (tx_ring->prod_idx == tx_ring->wq_len)
2584 tx_ring->prod_idx = 0;
2587 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "tx queued, slot %d, len %d\n",
2590 tx_ring->prod_idx, skb->len);
2592 atomic_dec(&tx_ring->tx_count);
2594 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595 netif_stop_subqueue(ndev, tx_ring->wq_id);
2596 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2598 * The queue got stopped because the tx_ring was full.
2599 * Wake it up, because it's now at least 25% empty.
2601 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2603 return NETDEV_TX_OK;
2606 static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2608 if (qdev->rx_ring_shadow_reg_area) {
2609 dma_free_coherent(&qdev->pdev->dev,
2611 qdev->rx_ring_shadow_reg_area,
2612 qdev->rx_ring_shadow_reg_dma);
2613 qdev->rx_ring_shadow_reg_area = NULL;
2615 if (qdev->tx_ring_shadow_reg_area) {
2616 dma_free_coherent(&qdev->pdev->dev,
2618 qdev->tx_ring_shadow_reg_area,
2619 qdev->tx_ring_shadow_reg_dma);
2620 qdev->tx_ring_shadow_reg_area = NULL;
2624 static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2626 qdev->rx_ring_shadow_reg_area =
2627 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2628 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2629 if (!qdev->rx_ring_shadow_reg_area) {
2630 netif_err(qdev, ifup, qdev->ndev,
2631 "Allocation of RX shadow space failed.\n");
2635 qdev->tx_ring_shadow_reg_area =
2636 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2637 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2638 if (!qdev->tx_ring_shadow_reg_area) {
2639 netif_err(qdev, ifup, qdev->ndev,
2640 "Allocation of TX shadow space failed.\n");
2641 goto err_wqp_sh_area;
2646 dma_free_coherent(&qdev->pdev->dev,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2653 static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2655 struct tx_ring_desc *tx_ring_desc;
2657 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2659 mac_iocb_ptr = tx_ring->wq_base;
2660 tx_ring_desc = tx_ring->q;
2661 for (i = 0; i < tx_ring->wq_len; i++) {
2662 tx_ring_desc->index = i;
2663 tx_ring_desc->skb = NULL;
2664 tx_ring_desc->queue_entry = mac_iocb_ptr;
2668 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2671 static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2672 struct tx_ring *tx_ring)
2674 if (tx_ring->wq_base) {
2675 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2676 tx_ring->wq_base, tx_ring->wq_base_dma);
2677 tx_ring->wq_base = NULL;
2683 static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2684 struct tx_ring *tx_ring)
2687 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2688 &tx_ring->wq_base_dma, GFP_ATOMIC);
2690 if (!tx_ring->wq_base ||
2691 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2695 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2702 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2703 tx_ring->wq_base, tx_ring->wq_base_dma);
2704 tx_ring->wq_base = NULL;
2706 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2710 static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2712 struct qlge_bq *lbq = &rx_ring->lbq;
2713 unsigned int last_offset;
2715 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2716 while (lbq->next_to_clean != lbq->next_to_use) {
2717 struct qlge_bq_desc *lbq_desc =
2718 &lbq->queue[lbq->next_to_clean];
2720 if (lbq_desc->p.pg_chunk.offset == last_offset)
2721 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2722 qlge_lbq_block_size(qdev),
2724 put_page(lbq_desc->p.pg_chunk.page);
2726 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2729 if (rx_ring->master_chunk.page) {
2730 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2731 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2732 put_page(rx_ring->master_chunk.page);
2733 rx_ring->master_chunk.page = NULL;
2737 static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2741 for (i = 0; i < QLGE_BQ_LEN; i++) {
2742 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2745 netif_err(qdev, ifup, qdev->ndev,
2746 "sbq_desc %d is NULL.\n", i);
2749 if (sbq_desc->p.skb) {
2750 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2753 dev_kfree_skb(sbq_desc->p.skb);
2754 sbq_desc->p.skb = NULL;
2759 /* Free all large and small rx buffers associated
2760 * with the completion queues for this device.
2762 static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2766 for (i = 0; i < qdev->rx_ring_count; i++) {
2767 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2769 if (rx_ring->lbq.queue)
2770 qlge_free_lbq_buffers(qdev, rx_ring);
2771 if (rx_ring->sbq.queue)
2772 qlge_free_sbq_buffers(qdev, rx_ring);
2776 static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2780 for (i = 0; i < qdev->rss_ring_count; i++)
2781 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2785 static int qlge_init_bq(struct qlge_bq *bq)
2787 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2788 struct qlge_adapter *qdev = rx_ring->qdev;
2789 struct qlge_bq_desc *bq_desc;
2793 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2794 &bq->base_dma, GFP_ATOMIC);
2798 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2804 bq_desc = &bq->queue[0];
2805 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2806 bq_desc->p.skb = NULL;
2808 bq_desc->buf_ptr = buf_ptr;
2814 static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2815 struct rx_ring *rx_ring)
2817 /* Free the small buffer queue. */
2818 if (rx_ring->sbq.base) {
2819 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2820 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2821 rx_ring->sbq.base = NULL;
2824 /* Free the small buffer queue control blocks. */
2825 kfree(rx_ring->sbq.queue);
2826 rx_ring->sbq.queue = NULL;
2828 /* Free the large buffer queue. */
2829 if (rx_ring->lbq.base) {
2830 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2831 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2832 rx_ring->lbq.base = NULL;
2835 /* Free the large buffer queue control blocks. */
2836 kfree(rx_ring->lbq.queue);
2837 rx_ring->lbq.queue = NULL;
2839 /* Free the rx queue. */
2840 if (rx_ring->cq_base) {
2841 dma_free_coherent(&qdev->pdev->dev,
2843 rx_ring->cq_base, rx_ring->cq_base_dma);
2844 rx_ring->cq_base = NULL;
2848 /* Allocate queues and buffers for this completions queue based
2849 * on the values in the parameter structure.
2851 static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2852 struct rx_ring *rx_ring)
2855 * Allocate the completion queue for this rx_ring.
2858 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2859 &rx_ring->cq_base_dma, GFP_ATOMIC);
2861 if (!rx_ring->cq_base) {
2862 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2866 if (rx_ring->cq_id < qdev->rss_ring_count &&
2867 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2868 qlge_free_rx_resources(qdev, rx_ring);
2875 static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2877 struct tx_ring *tx_ring;
2878 struct tx_ring_desc *tx_ring_desc;
2882 * Loop through all queues and free
2885 for (j = 0; j < qdev->tx_ring_count; j++) {
2886 tx_ring = &qdev->tx_ring[j];
2887 for (i = 0; i < tx_ring->wq_len; i++) {
2888 tx_ring_desc = &tx_ring->q[i];
2889 if (tx_ring_desc && tx_ring_desc->skb) {
2890 netif_err(qdev, ifdown, qdev->ndev,
2891 "Freeing lost SKB %p, from queue %d, index %d.\n",
2892 tx_ring_desc->skb, j,
2893 tx_ring_desc->index);
2894 qlge_unmap_send(qdev, tx_ring_desc,
2895 tx_ring_desc->map_cnt);
2896 dev_kfree_skb(tx_ring_desc->skb);
2897 tx_ring_desc->skb = NULL;
2903 static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2907 for (i = 0; i < qdev->tx_ring_count; i++)
2908 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2909 for (i = 0; i < qdev->rx_ring_count; i++)
2910 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2911 qlge_free_shadow_space(qdev);
2914 static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2918 /* Allocate space for our shadow registers and such. */
2919 if (qlge_alloc_shadow_space(qdev))
2922 for (i = 0; i < qdev->rx_ring_count; i++) {
2923 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2924 netif_err(qdev, ifup, qdev->ndev,
2925 "RX resource allocation failed.\n");
2929 /* Allocate tx queue resources */
2930 for (i = 0; i < qdev->tx_ring_count; i++) {
2931 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2932 netif_err(qdev, ifup, qdev->ndev,
2933 "TX resource allocation failed.\n");
2940 qlge_free_mem_resources(qdev);
2944 /* Set up the rx ring control block and pass it to the chip.
2945 * The control block is defined as
2946 * "Completion Queue Initialization Control Block", or cqicb.
2948 static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2950 struct cqicb *cqicb = &rx_ring->cqicb;
2951 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2952 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2953 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2954 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2955 void __iomem *doorbell_area =
2956 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2959 __le64 *base_indirect_ptr;
2962 /* Set up the shadow registers for this ring. */
2963 rx_ring->prod_idx_sh_reg = shadow_reg;
2964 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2965 *rx_ring->prod_idx_sh_reg = 0;
2966 shadow_reg += sizeof(u64);
2967 shadow_reg_dma += sizeof(u64);
2968 rx_ring->lbq.base_indirect = shadow_reg;
2969 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2970 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2971 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2972 rx_ring->sbq.base_indirect = shadow_reg;
2973 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2975 /* PCI doorbell mem area + 0x00 for consumer index register */
2976 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2977 rx_ring->cnsmr_idx = 0;
2978 rx_ring->curr_entry = rx_ring->cq_base;
2980 /* PCI doorbell mem area + 0x04 for valid register */
2981 rx_ring->valid_db_reg = doorbell_area + 0x04;
2983 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2984 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2986 /* PCI doorbell mem area + 0x1c */
2987 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2989 memset((void *)cqicb, 0, sizeof(struct cqicb));
2990 cqicb->msix_vect = rx_ring->irq;
2992 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
2995 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2997 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3000 * Set up the control block load flags.
3002 cqicb->flags = FLAGS_LC | /* Load queue base address */
3003 FLAGS_LV | /* Load MSI-X vector */
3004 FLAGS_LI; /* Load irq delay values */
3005 if (rx_ring->cq_id < qdev->rss_ring_count) {
3006 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3007 dma = (u64)rx_ring->lbq.base_dma;
3008 base_indirect_ptr = rx_ring->lbq.base_indirect;
3010 for (page_entries = 0;
3011 page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3013 base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3014 dma += DB_PAGE_SIZE;
3016 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3017 cqicb->lbq_buf_size =
3018 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3019 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3020 rx_ring->lbq.next_to_use = 0;
3021 rx_ring->lbq.next_to_clean = 0;
3023 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3024 dma = (u64)rx_ring->sbq.base_dma;
3025 base_indirect_ptr = rx_ring->sbq.base_indirect;
3027 for (page_entries = 0;
3028 page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3030 base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3031 dma += DB_PAGE_SIZE;
3034 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3035 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3036 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3037 rx_ring->sbq.next_to_use = 0;
3038 rx_ring->sbq.next_to_clean = 0;
3040 if (rx_ring->cq_id < qdev->rss_ring_count) {
3041 /* Inbound completion handling rx_rings run in
3042 * separate NAPI contexts.
3044 netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
3045 qlge_napi_poll_msix, 64);
3046 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3047 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3049 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3050 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3052 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3053 CFG_LCQ, rx_ring->cq_id);
3055 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3061 static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3063 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3064 void __iomem *doorbell_area =
3065 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3066 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3067 (tx_ring->wq_id * sizeof(u64));
3068 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3069 (tx_ring->wq_id * sizeof(u64));
3073 * Assign doorbell registers for this tx_ring.
3075 /* TX PCI doorbell mem area for tx producer index */
3076 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3077 tx_ring->prod_idx = 0;
3078 /* TX PCI doorbell mem area + 0x04 */
3079 tx_ring->valid_db_reg = doorbell_area + 0x04;
3082 * Assign shadow registers for this tx_ring.
3084 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3085 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3087 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3088 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3089 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3090 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3092 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3094 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3096 qlge_init_tx_ring(qdev, tx_ring);
3098 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3099 (u16)tx_ring->wq_id);
3101 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3107 static void qlge_disable_msix(struct qlge_adapter *qdev)
3109 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3110 pci_disable_msix(qdev->pdev);
3111 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3112 kfree(qdev->msi_x_entry);
3113 qdev->msi_x_entry = NULL;
3114 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3115 pci_disable_msi(qdev->pdev);
3116 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3120 /* We start by trying to get the number of vectors
3121 * stored in qdev->intr_count. If we don't get that
3122 * many then we reduce the count and try again.
3124 static void qlge_enable_msix(struct qlge_adapter *qdev)
3128 /* Get the MSIX vectors. */
3129 if (qlge_irq_type == MSIX_IRQ) {
3130 /* Try to alloc space for the msix struct,
3131 * if it fails then go to MSI/legacy.
3133 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3134 sizeof(struct msix_entry),
3136 if (!qdev->msi_x_entry) {
3137 qlge_irq_type = MSI_IRQ;
3141 for (i = 0; i < qdev->intr_count; i++)
3142 qdev->msi_x_entry[i].entry = i;
3144 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3145 1, qdev->intr_count);
3147 kfree(qdev->msi_x_entry);
3148 qdev->msi_x_entry = NULL;
3149 netif_warn(qdev, ifup, qdev->ndev,
3150 "MSI-X Enable failed, trying MSI.\n");
3151 qlge_irq_type = MSI_IRQ;
3153 qdev->intr_count = err;
3154 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3155 netif_info(qdev, ifup, qdev->ndev,
3156 "MSI-X Enabled, got %d vectors.\n",
3162 qdev->intr_count = 1;
3163 if (qlge_irq_type == MSI_IRQ) {
3164 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3165 set_bit(QL_MSI_ENABLED, &qdev->flags);
3166 netif_info(qdev, ifup, qdev->ndev,
3167 "Running with MSI interrupts.\n");
3171 qlge_irq_type = LEG_IRQ;
3172 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3173 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 "Running with legacy interrupts.\n");
3177 /* Each vector services 1 RSS ring and 1 or more
3178 * TX completion rings. This function loops through
3179 * the TX completion rings and assigns the vector that
3180 * will service it. An example would be if there are
3181 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3182 * This would mean that vector 0 would service RSS ring 0
3183 * and TX completion rings 0,1,2 and 3. Vector 1 would
3184 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3186 static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3189 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3191 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3192 /* Assign irq vectors to TX rx_rings.*/
3193 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3194 i < qdev->rx_ring_count; i++) {
3195 if (j == tx_rings_per_vector) {
3199 qdev->rx_ring[i].irq = vect;
3203 /* For single vector all rings have an irq
3206 for (i = 0; i < qdev->rx_ring_count; i++)
3207 qdev->rx_ring[i].irq = 0;
3211 /* Set the interrupt mask for this vector. Each vector
3212 * will service 1 RSS ring and 1 or more TX completion
3213 * rings. This function sets up a bit mask per vector
3214 * that indicates which rings it services.
3216 static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3218 int j, vect = ctx->intr;
3219 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3221 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3222 /* Add the RSS ring serviced by this vector
3225 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3226 /* Add the TX ring(s) serviced by this vector
3229 for (j = 0; j < tx_rings_per_vector; j++) {
3231 (1 << qdev->rx_ring[qdev->rss_ring_count +
3232 (vect * tx_rings_per_vector) + j].cq_id);
3235 /* For single vector we just shift each queue's
3238 for (j = 0; j < qdev->rx_ring_count; j++)
3239 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3244 * Here we build the intr_context structures based on
3245 * our rx_ring count and intr vector count.
3246 * The intr_context structure is used to hook each vector
3247 * to possibly different handlers.
3249 static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3252 struct intr_context *intr_context = &qdev->intr_context[0];
3254 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3255 /* Each rx_ring has it's
3256 * own intr_context since we have separate
3257 * vectors for each queue.
3259 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3260 qdev->rx_ring[i].irq = i;
3261 intr_context->intr = i;
3262 intr_context->qdev = qdev;
3263 /* Set up this vector's bit-mask that indicates
3264 * which queues it services.
3266 qlge_set_irq_mask(qdev, intr_context);
3268 * We set up each vectors enable/disable/read bits so
3269 * there's no bit/mask calculations in the critical path.
3271 intr_context->intr_en_mask =
3272 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3273 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3275 intr_context->intr_dis_mask =
3276 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3277 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3279 intr_context->intr_read_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3284 /* The first vector/queue handles
3285 * broadcast/multicast, fatal errors,
3286 * and firmware events. This in addition
3287 * to normal inbound NAPI processing.
3289 intr_context->handler = qlge_isr;
3290 sprintf(intr_context->name, "%s-rx-%d",
3291 qdev->ndev->name, i);
3294 * Inbound queues handle unicast frames only.
3296 intr_context->handler = qlge_msix_rx_isr;
3297 sprintf(intr_context->name, "%s-rx-%d",
3298 qdev->ndev->name, i);
3303 * All rx_rings use the same intr_context since
3304 * there is only one vector.
3306 intr_context->intr = 0;
3307 intr_context->qdev = qdev;
3309 * We set up each vectors enable/disable/read bits so
3310 * there's no bit/mask calculations in the critical path.
3312 intr_context->intr_en_mask =
3313 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3314 intr_context->intr_dis_mask =
3315 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3316 INTR_EN_TYPE_DISABLE;
3317 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3318 /* Experience shows that when using INTx interrupts,
3319 * the device does not always auto-mask INTR_EN_EN.
3320 * Moreover, masking INTR_EN_EN manually does not
3321 * immediately prevent interrupt generation.
3323 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3325 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3327 intr_context->intr_read_mask =
3328 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3330 * Single interrupt means one handler for all rings.
3332 intr_context->handler = qlge_isr;
3333 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3334 /* Set up this vector's bit-mask that indicates
3335 * which queues it services. In this case there is
3336 * a single vector so it will service all RSS and
3337 * TX completion rings.
3339 qlge_set_irq_mask(qdev, intr_context);
3341 /* Tell the TX completion rings which MSIx vector
3342 * they will be using.
3344 qlge_set_tx_vect(qdev);
3347 static void qlge_free_irq(struct qlge_adapter *qdev)
3350 struct intr_context *intr_context = &qdev->intr_context[0];
3352 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3353 if (intr_context->hooked) {
3354 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3355 free_irq(qdev->msi_x_entry[i].vector,
3358 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3362 qlge_disable_msix(qdev);
3365 static int qlge_request_irq(struct qlge_adapter *qdev)
3369 struct pci_dev *pdev = qdev->pdev;
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3372 qlge_resolve_queues_to_irqs(qdev);
3374 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3375 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3376 status = request_irq(qdev->msi_x_entry[i].vector,
3377 intr_context->handler,
3382 netif_err(qdev, ifup, qdev->ndev,
3383 "Failed request for MSIX interrupt %d.\n",
3388 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3389 "trying msi or legacy interrupts.\n");
3390 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3391 "%s: irq = %d.\n", __func__, pdev->irq);
3392 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3393 "%s: context->name = %s.\n", __func__,
3394 intr_context->name);
3395 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3396 "%s: dev_id = 0x%p.\n", __func__,
3399 request_irq(pdev->irq, qlge_isr,
3400 test_bit(QL_MSI_ENABLED, &qdev->flags)
3403 intr_context->name, &qdev->rx_ring[0]);
3407 netif_err(qdev, ifup, qdev->ndev,
3408 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3409 intr_context->name);
3411 intr_context->hooked = 1;
3415 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3416 qlge_free_irq(qdev);
3420 static int qlge_start_rss(struct qlge_adapter *qdev)
3422 static const u8 init_hash_seed[] = {
3423 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3424 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3425 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3426 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3427 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3429 struct ricb *ricb = &qdev->ricb;
3432 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3434 memset((void *)ricb, 0, sizeof(*ricb));
3436 ricb->base_cq = RSS_L4K;
3438 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3439 ricb->mask = cpu_to_le16((u16)(0x3ff));
3442 * Fill out the Indirection Table.
3444 for (i = 0; i < 1024; i++)
3445 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3447 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3448 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3450 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3452 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3458 static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3462 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3465 /* Clear all the entries in the routing table. */
3466 for (i = 0; i < 16; i++) {
3467 status = qlge_set_routing_reg(qdev, i, 0, 0);
3469 netif_err(qdev, ifup, qdev->ndev,
3470 "Failed to init routing register for CAM packets.\n");
3474 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3478 /* Initialize the frame-to-queue routing. */
3479 static int qlge_route_initialize(struct qlge_adapter *qdev)
3483 /* Clear all the entries in the routing table. */
3484 status = qlge_clear_routing_entries(qdev);
3488 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3492 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3493 RT_IDX_IP_CSUM_ERR, 1);
3495 netif_err(qdev, ifup, qdev->ndev,
3496 "Failed to init routing register for IP CSUM error packets.\n");
3499 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3500 RT_IDX_TU_CSUM_ERR, 1);
3502 netif_err(qdev, ifup, qdev->ndev,
3503 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3506 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3508 netif_err(qdev, ifup, qdev->ndev,
3509 "Failed to init routing register for broadcast packets.\n");
3512 /* If we have more than one inbound queue, then turn on RSS in the
3515 if (qdev->rss_ring_count > 1) {
3516 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3517 RT_IDX_RSS_MATCH, 1);
3519 netif_err(qdev, ifup, qdev->ndev,
3520 "Failed to init routing register for MATCH RSS packets.\n");
3525 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3528 netif_err(qdev, ifup, qdev->ndev,
3529 "Failed to init routing register for CAM packets.\n");
3531 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3535 int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3539 /* If check if the link is up and use to
3540 * determine if we are setting or clearing
3541 * the MAC address in the CAM.
3543 set = qlge_read32(qdev, STS);
3544 set &= qdev->port_link_up;
3545 status = qlge_set_mac_addr(qdev, set);
3547 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3551 status = qlge_route_initialize(qdev);
3553 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3558 static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3565 * Set up the System register to halt on errors.
3567 value = SYS_EFE | SYS_FAE;
3569 qlge_write32(qdev, SYS, mask | value);
3571 /* Set the default queue, and VLAN behavior. */
3572 value = NIC_RCV_CFG_DFQ;
3573 mask = NIC_RCV_CFG_DFQ_MASK;
3574 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3575 value |= NIC_RCV_CFG_RV;
3576 mask |= (NIC_RCV_CFG_RV << 16);
3578 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3580 /* Set the MPI interrupt to enabled. */
3581 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3583 /* Enable the function, set pagesize, enable error checking. */
3584 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3585 FSC_EC | FSC_VM_PAGE_4K;
3586 value |= SPLT_SETTING;
3588 /* Set/clear header splitting. */
3589 mask = FSC_VM_PAGESIZE_MASK |
3590 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3591 qlge_write32(qdev, FSC, mask | value);
3593 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3595 /* Set RX packet routing to use port/pci function on which the
3596 * packet arrived on in addition to usual frame routing.
3597 * This is helpful on bonding where both interfaces can have
3598 * the same MAC address.
3600 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3601 /* Reroute all packets to our Interface.
3602 * They may have been routed to MPI firmware
3605 value = qlge_read32(qdev, MGMT_RCV_CFG);
3606 value &= ~MGMT_RCV_CFG_RM;
3609 /* Sticky reg needs clearing due to WOL. */
3610 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3611 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3613 /* Default WOL is enable on Mezz cards */
3614 if (qdev->pdev->subsystem_device == 0x0068 ||
3615 qdev->pdev->subsystem_device == 0x0180)
3616 qdev->wol = WAKE_MAGIC;
3618 /* Start up the rx queues. */
3619 for (i = 0; i < qdev->rx_ring_count; i++) {
3620 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3622 netif_err(qdev, ifup, qdev->ndev,
3623 "Failed to start rx ring[%d].\n", i);
3628 /* If there is more than one inbound completion queue
3629 * then download a RICB to configure RSS.
3631 if (qdev->rss_ring_count > 1) {
3632 status = qlge_start_rss(qdev);
3634 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3639 /* Start up the tx queues. */
3640 for (i = 0; i < qdev->tx_ring_count; i++) {
3641 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3643 netif_err(qdev, ifup, qdev->ndev,
3644 "Failed to start tx ring[%d].\n", i);
3649 /* Initialize the port and set the max framesize. */
3650 status = qdev->nic_ops->port_initialize(qdev);
3652 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3654 /* Set up the MAC address and frame routing filter. */
3655 status = qlge_cam_route_initialize(qdev);
3657 netif_err(qdev, ifup, qdev->ndev,
3658 "Failed to init CAM/Routing tables.\n");
3662 /* Start NAPI for the RSS queues. */
3663 for (i = 0; i < qdev->rss_ring_count; i++)
3664 napi_enable(&qdev->rx_ring[i].napi);
3669 /* Issue soft reset to chip. */
3670 static int qlge_adapter_reset(struct qlge_adapter *qdev)
3674 unsigned long end_jiffies;
3676 /* Clear all the entries in the routing table. */
3677 status = qlge_clear_routing_entries(qdev);
3679 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3683 /* Check if bit is set then skip the mailbox command and
3684 * clear the bit, else we are in normal reset process.
3686 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3687 /* Stop management traffic. */
3688 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3690 /* Wait for the NIC and MGMNT FIFOs to empty. */
3691 qlge_wait_fifo_empty(qdev);
3693 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3696 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3698 end_jiffies = jiffies + usecs_to_jiffies(30);
3700 value = qlge_read32(qdev, RST_FO);
3701 if ((value & RST_FO_FR) == 0)
3704 } while (time_before(jiffies, end_jiffies));
3706 if (value & RST_FO_FR) {
3707 netif_err(qdev, ifdown, qdev->ndev,
3708 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3709 status = -ETIMEDOUT;
3712 /* Resume management traffic. */
3713 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3717 static void qlge_display_dev_info(struct net_device *ndev)
3719 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3721 netif_info(qdev, probe, qdev->ndev,
3722 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3725 qdev->chip_rev_id & 0x0000000f,
3726 qdev->chip_rev_id >> 4 & 0x0000000f,
3727 qdev->chip_rev_id >> 8 & 0x0000000f,
3728 qdev->chip_rev_id >> 12 & 0x0000000f);
3729 netif_info(qdev, probe, qdev->ndev,
3730 "MAC address %pM\n", ndev->dev_addr);
3733 static int qlge_wol(struct qlge_adapter *qdev)
3736 u32 wol = MB_WOL_DISABLE;
3738 /* The CAM is still intact after a reset, but if we
3739 * are doing WOL, then we may need to program the
3740 * routing regs. We would also need to issue the mailbox
3741 * commands to instruct the MPI what to do per the ethtool
3745 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3746 WAKE_MCAST | WAKE_BCAST)) {
3747 netif_err(qdev, ifdown, qdev->ndev,
3748 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3753 if (qdev->wol & WAKE_MAGIC) {
3754 status = qlge_mb_wol_set_magic(qdev, 1);
3756 netif_err(qdev, ifdown, qdev->ndev,
3757 "Failed to set magic packet on %s.\n",
3761 netif_info(qdev, drv, qdev->ndev,
3762 "Enabled magic packet successfully on %s.\n",
3765 wol |= MB_WOL_MAGIC_PKT;
3769 wol |= MB_WOL_MODE_ON;
3770 status = qlge_mb_wol_mode(qdev, wol);
3771 netif_err(qdev, drv, qdev->ndev,
3772 "WOL %s (wol code 0x%x) on %s\n",
3773 (status == 0) ? "Successfully set" : "Failed",
3774 wol, qdev->ndev->name);
3780 static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3782 /* Don't kill the reset worker thread if we
3783 * are in the process of recovery.
3785 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3786 cancel_delayed_work_sync(&qdev->asic_reset_work);
3787 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3788 cancel_delayed_work_sync(&qdev->mpi_work);
3789 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3790 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3793 static int qlge_adapter_down(struct qlge_adapter *qdev)
3797 qlge_link_off(qdev);
3799 qlge_cancel_all_work_sync(qdev);
3801 for (i = 0; i < qdev->rss_ring_count; i++)
3802 napi_disable(&qdev->rx_ring[i].napi);
3804 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3806 qlge_disable_interrupts(qdev);
3808 qlge_tx_ring_clean(qdev);
3810 /* Call netif_napi_del() from common point. */
3811 for (i = 0; i < qdev->rss_ring_count; i++)
3812 netif_napi_del(&qdev->rx_ring[i].napi);
3814 status = qlge_adapter_reset(qdev);
3816 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3818 qlge_free_rx_buffers(qdev);
3823 static int qlge_adapter_up(struct qlge_adapter *qdev)
3827 err = qlge_adapter_initialize(qdev);
3829 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3832 set_bit(QL_ADAPTER_UP, &qdev->flags);
3833 qlge_alloc_rx_buffers(qdev);
3834 /* If the port is initialized and the
3835 * link is up the turn on the carrier.
3837 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3838 (qlge_read32(qdev, STS) & qdev->port_link_up))
3840 /* Restore rx mode. */
3841 clear_bit(QL_ALLMULTI, &qdev->flags);
3842 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3843 qlge_set_multicast_list(qdev->ndev);
3845 /* Restore vlan setting. */
3846 qlge_restore_vlan(qdev);
3848 qlge_enable_interrupts(qdev);
3849 qlge_enable_all_completion_interrupts(qdev);
3850 netif_tx_start_all_queues(qdev->ndev);
3854 qlge_adapter_reset(qdev);
3858 static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3860 qlge_free_mem_resources(qdev);
3861 qlge_free_irq(qdev);
3864 static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3866 if (qlge_alloc_mem_resources(qdev)) {
3867 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3870 return qlge_request_irq(qdev);
3873 static int qlge_close(struct net_device *ndev)
3875 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3878 /* If we hit pci_channel_io_perm_failure
3879 * failure condition, then we already
3880 * brought the adapter down.
3882 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3883 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3884 clear_bit(QL_EEH_FATAL, &qdev->flags);
3889 * Wait for device to recover from a reset.
3890 * (Rarely happens, but possible.)
3892 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3895 /* Make sure refill_work doesn't re-enable napi */
3896 for (i = 0; i < qdev->rss_ring_count; i++)
3897 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3899 qlge_adapter_down(qdev);
3900 qlge_release_adapter_resources(qdev);
3904 static void qlge_set_lb_size(struct qlge_adapter *qdev)
3906 if (qdev->ndev->mtu <= 1500)
3907 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3909 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3910 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3913 static int qlge_configure_rings(struct qlge_adapter *qdev)
3916 struct rx_ring *rx_ring;
3917 struct tx_ring *tx_ring;
3918 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3920 /* In a perfect world we have one RSS ring for each CPU
3921 * and each has it's own vector. To do that we ask for
3922 * cpu_cnt vectors. qlge_enable_msix() will adjust the
3923 * vector count to what we actually get. We then
3924 * allocate an RSS ring for each.
3925 * Essentially, we are doing min(cpu_count, msix_vector_count).
3927 qdev->intr_count = cpu_cnt;
3928 qlge_enable_msix(qdev);
3929 /* Adjust the RSS ring count to the actual vector count. */
3930 qdev->rss_ring_count = qdev->intr_count;
3931 qdev->tx_ring_count = cpu_cnt;
3932 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3934 for (i = 0; i < qdev->tx_ring_count; i++) {
3935 tx_ring = &qdev->tx_ring[i];
3936 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3937 tx_ring->qdev = qdev;
3939 tx_ring->wq_len = qdev->tx_ring_size;
3941 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3944 * The completion queue ID for the tx rings start
3945 * immediately after the rss rings.
3947 tx_ring->cq_id = qdev->rss_ring_count + i;
3950 for (i = 0; i < qdev->rx_ring_count; i++) {
3951 rx_ring = &qdev->rx_ring[i];
3952 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3953 rx_ring->qdev = qdev;
3955 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3956 if (i < qdev->rss_ring_count) {
3958 * Inbound (RSS) queues.
3960 rx_ring->cq_len = qdev->rx_ring_size;
3962 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3963 rx_ring->lbq.type = QLGE_LB;
3964 rx_ring->sbq.type = QLGE_SB;
3965 INIT_DELAYED_WORK(&rx_ring->refill_work,
3969 * Outbound queue handles outbound completions only.
3971 /* outbound cq is same size as tx_ring it services. */
3972 rx_ring->cq_len = qdev->tx_ring_size;
3974 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3980 static int qlge_open(struct net_device *ndev)
3982 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3985 err = qlge_adapter_reset(qdev);
3989 qlge_set_lb_size(qdev);
3990 err = qlge_configure_rings(qdev);
3994 err = qlge_get_adapter_resources(qdev);
3998 err = qlge_adapter_up(qdev);
4005 qlge_release_adapter_resources(qdev);
4009 static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4013 /* Wait for an outstanding reset to complete. */
4014 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4017 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4018 netif_err(qdev, ifup, qdev->ndev,
4019 "Waiting for adapter UP...\n");
4024 netif_err(qdev, ifup, qdev->ndev,
4025 "Timed out waiting for adapter UP\n");
4030 status = qlge_adapter_down(qdev);
4034 qlge_set_lb_size(qdev);
4036 status = qlge_adapter_up(qdev);
4042 netif_alert(qdev, ifup, qdev->ndev,
4043 "Driver up/down cycle failed, closing device.\n");
4044 set_bit(QL_ADAPTER_UP, &qdev->flags);
4045 dev_close(qdev->ndev);
4049 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4051 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4054 if (ndev->mtu == 1500 && new_mtu == 9000)
4055 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4056 else if (ndev->mtu == 9000 && new_mtu == 1500)
4057 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4061 queue_delayed_work(qdev->workqueue,
4062 &qdev->mpi_port_cfg_work, 3 * HZ);
4064 ndev->mtu = new_mtu;
4066 if (!netif_running(qdev->ndev))
4069 status = qlge_change_rx_buffers(qdev);
4071 netif_err(qdev, ifup, qdev->ndev,
4072 "Changing MTU failed.\n");
4078 static struct net_device_stats *qlge_get_stats(struct net_device
4081 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4082 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4083 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4084 unsigned long pkts, mcast, dropped, errors, bytes;
4088 pkts = mcast = dropped = errors = bytes = 0;
4089 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4090 pkts += rx_ring->rx_packets;
4091 bytes += rx_ring->rx_bytes;
4092 dropped += rx_ring->rx_dropped;
4093 errors += rx_ring->rx_errors;
4094 mcast += rx_ring->rx_multicast;
4096 ndev->stats.rx_packets = pkts;
4097 ndev->stats.rx_bytes = bytes;
4098 ndev->stats.rx_dropped = dropped;
4099 ndev->stats.rx_errors = errors;
4100 ndev->stats.multicast = mcast;
4103 pkts = errors = bytes = 0;
4104 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4105 pkts += tx_ring->tx_packets;
4106 bytes += tx_ring->tx_bytes;
4107 errors += tx_ring->tx_errors;
4109 ndev->stats.tx_packets = pkts;
4110 ndev->stats.tx_bytes = bytes;
4111 ndev->stats.tx_errors = errors;
4112 return &ndev->stats;
4115 static void qlge_set_multicast_list(struct net_device *ndev)
4117 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4118 struct netdev_hw_addr *ha;
4121 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4125 * Set or clear promiscuous mode if a
4126 * transition is taking place.
4128 if (ndev->flags & IFF_PROMISC) {
4129 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4130 if (qlge_set_routing_reg
4131 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4132 netif_err(qdev, hw, qdev->ndev,
4133 "Failed to set promiscuous mode.\n");
4135 set_bit(QL_PROMISCUOUS, &qdev->flags);
4139 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4140 if (qlge_set_routing_reg
4141 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4142 netif_err(qdev, hw, qdev->ndev,
4143 "Failed to clear promiscuous mode.\n");
4145 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4151 * Set or clear all multicast mode if a
4152 * transition is taking place.
4154 if ((ndev->flags & IFF_ALLMULTI) ||
4155 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4156 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4157 if (qlge_set_routing_reg
4158 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4159 netif_err(qdev, hw, qdev->ndev,
4160 "Failed to set all-multi mode.\n");
4162 set_bit(QL_ALLMULTI, &qdev->flags);
4166 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4167 if (qlge_set_routing_reg
4168 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4169 netif_err(qdev, hw, qdev->ndev,
4170 "Failed to clear all-multi mode.\n");
4172 clear_bit(QL_ALLMULTI, &qdev->flags);
4177 if (!netdev_mc_empty(ndev)) {
4178 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4182 netdev_for_each_mc_addr(ha, ndev) {
4183 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4184 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4185 netif_err(qdev, hw, qdev->ndev,
4186 "Failed to loadmulticast address.\n");
4187 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4192 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4193 if (qlge_set_routing_reg
4194 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4195 netif_err(qdev, hw, qdev->ndev,
4196 "Failed to set multicast match mode.\n");
4198 set_bit(QL_ALLMULTI, &qdev->flags);
4202 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4205 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4207 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4208 struct sockaddr *addr = p;
4211 if (!is_valid_ether_addr(addr->sa_data))
4212 return -EADDRNOTAVAIL;
4213 eth_hw_addr_set(ndev, addr->sa_data);
4214 /* Update local copy of current mac address. */
4215 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4217 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4220 status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
4221 MAC_ADDR_TYPE_CAM_MAC,
4222 qdev->func * MAX_CQ);
4224 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4225 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4229 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4231 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4233 qlge_queue_asic_error(qdev);
4236 static void qlge_asic_reset_work(struct work_struct *work)
4238 struct qlge_adapter *qdev =
4239 container_of(work, struct qlge_adapter, asic_reset_work.work);
4243 status = qlge_adapter_down(qdev);
4247 status = qlge_adapter_up(qdev);
4251 /* Restore rx mode. */
4252 clear_bit(QL_ALLMULTI, &qdev->flags);
4253 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4254 qlge_set_multicast_list(qdev->ndev);
4259 netif_alert(qdev, ifup, qdev->ndev,
4260 "Driver up/down cycle failed, closing device\n");
4262 set_bit(QL_ADAPTER_UP, &qdev->flags);
4263 dev_close(qdev->ndev);
4267 static const struct nic_operations qla8012_nic_ops = {
4268 .get_flash = qlge_get_8012_flash_params,
4269 .port_initialize = qlge_8012_port_initialize,
4272 static const struct nic_operations qla8000_nic_ops = {
4273 .get_flash = qlge_get_8000_flash_params,
4274 .port_initialize = qlge_8000_port_initialize,
4277 /* Find the pcie function number for the other NIC
4278 * on this chip. Since both NIC functions share a
4279 * common firmware we have the lowest enabled function
4280 * do any common work. Examples would be resetting
4281 * after a fatal firmware error, or doing a firmware
4284 static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4288 u32 nic_func1, nic_func2;
4290 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4295 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4296 MPI_TEST_NIC_FUNC_MASK);
4297 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4298 MPI_TEST_NIC_FUNC_MASK);
4300 if (qdev->func == nic_func1)
4301 qdev->alt_func = nic_func2;
4302 else if (qdev->func == nic_func2)
4303 qdev->alt_func = nic_func1;
4310 static int qlge_get_board_info(struct qlge_adapter *qdev)
4315 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4319 status = qlge_get_alt_pcie_func(qdev);
4323 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4325 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4326 qdev->port_link_up = STS_PL1;
4327 qdev->port_init = STS_PI1;
4328 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4329 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4331 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4332 qdev->port_link_up = STS_PL0;
4333 qdev->port_init = STS_PI0;
4334 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4335 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4337 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4338 qdev->device_id = qdev->pdev->device;
4339 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4340 qdev->nic_ops = &qla8012_nic_ops;
4341 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4342 qdev->nic_ops = &qla8000_nic_ops;
4346 static void qlge_release_all(struct pci_dev *pdev)
4348 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4350 if (qdev->workqueue) {
4351 destroy_workqueue(qdev->workqueue);
4352 qdev->workqueue = NULL;
4356 iounmap(qdev->reg_base);
4357 if (qdev->doorbell_area)
4358 iounmap(qdev->doorbell_area);
4359 vfree(qdev->mpi_coredump);
4360 pci_release_regions(pdev);
4363 static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4366 struct net_device *ndev = qdev->ndev;
4369 err = pci_enable_device(pdev);
4371 dev_err(&pdev->dev, "PCI device enable failed.\n");
4376 pci_set_drvdata(pdev, qdev);
4378 /* Set PCIe read request size */
4379 err = pcie_set_readrq(pdev, 4096);
4381 dev_err(&pdev->dev, "Set readrq failed.\n");
4382 goto err_disable_pci;
4385 err = pci_request_regions(pdev, DRV_NAME);
4387 dev_err(&pdev->dev, "PCI region request failed.\n");
4388 goto err_disable_pci;
4391 pci_set_master(pdev);
4392 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4393 set_bit(QL_DMA64, &qdev->flags);
4394 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4396 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4398 err = dma_set_coherent_mask(&pdev->dev,
4403 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4404 goto err_release_pci;
4407 /* Set PCIe reset type for EEH to fundamental. */
4408 pdev->needs_freset = 1;
4409 pci_save_state(pdev);
4411 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4412 if (!qdev->reg_base) {
4413 dev_err(&pdev->dev, "Register mapping failed.\n");
4415 goto err_release_pci;
4418 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4419 qdev->doorbell_area =
4420 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4421 if (!qdev->doorbell_area) {
4422 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4424 goto err_iounmap_base;
4427 err = qlge_get_board_info(qdev);
4429 dev_err(&pdev->dev, "Register access failed.\n");
4431 goto err_iounmap_doorbell;
4433 qdev->msg_enable = netif_msg_init(debug, default_msg);
4434 spin_lock_init(&qdev->stats_lock);
4436 if (qlge_mpi_coredump) {
4437 qdev->mpi_coredump =
4438 vmalloc(sizeof(struct qlge_mpi_coredump));
4439 if (!qdev->mpi_coredump) {
4441 goto err_iounmap_doorbell;
4443 if (qlge_force_coredump)
4444 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4446 /* make sure the EEPROM is good */
4447 err = qdev->nic_ops->get_flash(qdev);
4449 dev_err(&pdev->dev, "Invalid FLASH.\n");
4450 goto err_free_mpi_coredump;
4453 /* Keep local copy of current mac address. */
4454 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4456 /* Set up the default ring sizes. */
4457 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4458 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4460 /* Set up the coalescing parameters. */
4461 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4462 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4463 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4464 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4467 * Set up the operating parameters.
4469 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4471 if (!qdev->workqueue) {
4473 goto err_free_mpi_coredump;
4476 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4477 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4478 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4479 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4480 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4481 init_completion(&qdev->ide_completion);
4482 mutex_init(&qdev->mpi_mutex);
4485 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4486 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4487 DRV_NAME, DRV_VERSION);
4491 err_free_mpi_coredump:
4492 vfree(qdev->mpi_coredump);
4493 err_iounmap_doorbell:
4494 iounmap(qdev->doorbell_area);
4496 iounmap(qdev->reg_base);
4498 pci_release_regions(pdev);
4500 pci_disable_device(pdev);
4505 static const struct net_device_ops qlge_netdev_ops = {
4506 .ndo_open = qlge_open,
4507 .ndo_stop = qlge_close,
4508 .ndo_start_xmit = qlge_send,
4509 .ndo_change_mtu = qlge_change_mtu,
4510 .ndo_get_stats = qlge_get_stats,
4511 .ndo_set_rx_mode = qlge_set_multicast_list,
4512 .ndo_set_mac_address = qlge_set_mac_address,
4513 .ndo_validate_addr = eth_validate_addr,
4514 .ndo_tx_timeout = qlge_tx_timeout,
4515 .ndo_set_features = qlge_set_features,
4516 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4517 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4520 static void qlge_timer(struct timer_list *t)
4522 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4525 var = qlge_read32(qdev, STS);
4526 if (pci_channel_offline(qdev->pdev)) {
4527 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4531 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4534 static const struct devlink_ops qlge_devlink_ops;
4536 static int qlge_probe(struct pci_dev *pdev,
4537 const struct pci_device_id *pci_entry)
4539 struct qlge_netdev_priv *ndev_priv;
4540 struct qlge_adapter *qdev = NULL;
4541 struct net_device *ndev = NULL;
4542 struct devlink *devlink;
4543 static int cards_found;
4546 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
4551 qdev = devlink_priv(devlink);
4553 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4555 netif_get_num_default_rss_queues()));
4561 ndev_priv = netdev_priv(ndev);
4562 ndev_priv->qdev = qdev;
4563 ndev_priv->ndev = ndev;
4565 err = qlge_init_device(pdev, qdev, cards_found);
4569 SET_NETDEV_DEV(ndev, &pdev->dev);
4570 ndev->hw_features = NETIF_F_SG |
4574 NETIF_F_HW_VLAN_CTAG_TX |
4575 NETIF_F_HW_VLAN_CTAG_RX |
4576 NETIF_F_HW_VLAN_CTAG_FILTER |
4578 ndev->features = ndev->hw_features;
4579 ndev->vlan_features = ndev->hw_features;
4580 /* vlan gets same features (except vlan filter) */
4581 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4582 NETIF_F_HW_VLAN_CTAG_TX |
4583 NETIF_F_HW_VLAN_CTAG_RX);
4585 if (test_bit(QL_DMA64, &qdev->flags))
4586 ndev->features |= NETIF_F_HIGHDMA;
4589 * Set up net_device structure.
4591 ndev->tx_queue_len = qdev->tx_ring_size;
4592 ndev->irq = pdev->irq;
4594 ndev->netdev_ops = &qlge_netdev_ops;
4595 ndev->ethtool_ops = &qlge_ethtool_ops;
4596 ndev->watchdog_timeo = 10 * HZ;
4598 /* MTU range: this driver only supports 1500 or 9000, so this only
4599 * filters out values above or below, and we'll rely on
4600 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4602 ndev->min_mtu = ETH_DATA_LEN;
4603 ndev->max_mtu = 9000;
4605 err = register_netdev(ndev);
4607 dev_err(&pdev->dev, "net device registration failed.\n");
4611 err = qlge_health_create_reporters(qdev);
4613 goto unregister_netdev;
4615 /* Start up the timer to trigger EEH if
4618 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4619 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4620 qlge_link_off(qdev);
4621 qlge_display_dev_info(ndev);
4622 atomic_set(&qdev->lb_count, 0);
4624 devlink_register(devlink);
4628 unregister_netdev(ndev);
4630 qlge_release_all(pdev);
4631 pci_disable_device(pdev);
4635 devlink_free(devlink);
4640 netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4642 return qlge_send(skb, ndev);
4645 int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4647 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4650 static void qlge_remove(struct pci_dev *pdev)
4652 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4653 struct net_device *ndev = qdev->ndev;
4654 struct devlink *devlink = priv_to_devlink(qdev);
4656 devlink_unregister(devlink);
4657 del_timer_sync(&qdev->timer);
4658 qlge_cancel_all_work_sync(qdev);
4659 unregister_netdev(ndev);
4660 qlge_release_all(pdev);
4661 pci_disable_device(pdev);
4662 devlink_health_reporter_destroy(qdev->reporter);
4663 devlink_free(devlink);
4667 /* Clean up resources without touching hardware. */
4668 static void qlge_eeh_close(struct net_device *ndev)
4670 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4673 if (netif_carrier_ok(ndev)) {
4674 netif_carrier_off(ndev);
4675 netif_stop_queue(ndev);
4678 /* Disabling the timer */
4679 qlge_cancel_all_work_sync(qdev);
4681 for (i = 0; i < qdev->rss_ring_count; i++)
4682 netif_napi_del(&qdev->rx_ring[i].napi);
4684 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4685 qlge_tx_ring_clean(qdev);
4686 qlge_free_rx_buffers(qdev);
4687 qlge_release_adapter_resources(qdev);
4691 * This callback is called by the PCI subsystem whenever
4692 * a PCI bus error is detected.
4694 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4695 pci_channel_state_t state)
4697 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4698 struct net_device *ndev = qdev->ndev;
4701 case pci_channel_io_normal:
4702 return PCI_ERS_RESULT_CAN_RECOVER;
4703 case pci_channel_io_frozen:
4704 netif_device_detach(ndev);
4705 del_timer_sync(&qdev->timer);
4706 if (netif_running(ndev))
4707 qlge_eeh_close(ndev);
4708 pci_disable_device(pdev);
4709 return PCI_ERS_RESULT_NEED_RESET;
4710 case pci_channel_io_perm_failure:
4712 "%s: pci_channel_io_perm_failure.\n", __func__);
4713 del_timer_sync(&qdev->timer);
4714 qlge_eeh_close(ndev);
4715 set_bit(QL_EEH_FATAL, &qdev->flags);
4716 return PCI_ERS_RESULT_DISCONNECT;
4719 /* Request a slot reset. */
4720 return PCI_ERS_RESULT_NEED_RESET;
4724 * This callback is called after the PCI buss has been reset.
4725 * Basically, this tries to restart the card from scratch.
4726 * This is a shortened version of the device probe/discovery code,
4727 * it resembles the first-half of the () routine.
4729 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4731 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4733 pdev->error_state = pci_channel_io_normal;
4735 pci_restore_state(pdev);
4736 if (pci_enable_device(pdev)) {
4737 netif_err(qdev, ifup, qdev->ndev,
4738 "Cannot re-enable PCI device after reset.\n");
4739 return PCI_ERS_RESULT_DISCONNECT;
4741 pci_set_master(pdev);
4743 if (qlge_adapter_reset(qdev)) {
4744 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4745 set_bit(QL_EEH_FATAL, &qdev->flags);
4746 return PCI_ERS_RESULT_DISCONNECT;
4749 return PCI_ERS_RESULT_RECOVERED;
4752 static void qlge_io_resume(struct pci_dev *pdev)
4754 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4755 struct net_device *ndev = qdev->ndev;
4758 if (netif_running(ndev)) {
4759 err = qlge_open(ndev);
4761 netif_err(qdev, ifup, qdev->ndev,
4762 "Device initialization failed after reset.\n");
4766 netif_err(qdev, ifup, qdev->ndev,
4767 "Device was not running prior to EEH.\n");
4769 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4770 netif_device_attach(ndev);
4773 static const struct pci_error_handlers qlge_err_handler = {
4774 .error_detected = qlge_io_error_detected,
4775 .slot_reset = qlge_io_slot_reset,
4776 .resume = qlge_io_resume,
4779 static int __maybe_unused qlge_suspend(struct device *dev_d)
4781 struct pci_dev *pdev = to_pci_dev(dev_d);
4782 struct qlge_adapter *qdev;
4783 struct net_device *ndev;
4786 qdev = pci_get_drvdata(pdev);
4788 netif_device_detach(ndev);
4789 del_timer_sync(&qdev->timer);
4791 if (netif_running(ndev)) {
4792 err = qlge_adapter_down(qdev);
4802 static int __maybe_unused qlge_resume(struct device *dev_d)
4804 struct pci_dev *pdev = to_pci_dev(dev_d);
4805 struct qlge_adapter *qdev;
4806 struct net_device *ndev;
4809 qdev = pci_get_drvdata(pdev);
4812 pci_set_master(pdev);
4814 device_wakeup_disable(dev_d);
4816 if (netif_running(ndev)) {
4817 err = qlge_adapter_up(qdev);
4822 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4823 netif_device_attach(ndev);
4828 static void qlge_shutdown(struct pci_dev *pdev)
4830 qlge_suspend(&pdev->dev);
4833 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4835 static struct pci_driver qlge_driver = {
4837 .id_table = qlge_pci_tbl,
4838 .probe = qlge_probe,
4839 .remove = qlge_remove,
4840 .driver.pm = &qlge_pm_ops,
4841 .shutdown = qlge_shutdown,
4842 .err_handler = &qlge_err_handler
4845 module_pci_driver(qlge_driver);