1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic qlge NIC HBA Driver
4 * Copyright (c) 2003-2008 QLogic Corporation
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
60 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62 static int debug = -1; /* defaults above */
63 module_param(debug, int, 0664);
64 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 static int qlge_irq_type = MSIX_IRQ;
70 module_param(qlge_irq_type, int, 0664);
71 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73 static int qlge_mpi_coredump;
74 module_param(qlge_mpi_coredump, int, 0);
75 MODULE_PARM_DESC(qlge_mpi_coredump,
76 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78 static int qlge_force_coredump;
79 module_param(qlge_force_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_force_coredump,
81 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83 static const struct pci_device_id qlge_pci_tbl[] = {
84 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
86 /* required last entry */
90 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92 static int ql_wol(struct ql_adapter *);
93 static void qlge_set_multicast_list(struct net_device *);
94 static int ql_adapter_down(struct ql_adapter *);
95 static int ql_adapter_up(struct ql_adapter *);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
141 unsigned int wait_count = 30;
144 if (!ql_sem_trylock(qdev, sem_mask))
147 } while (--wait_count);
151 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
153 ql_write32(qdev, SEM, sem_mask);
154 ql_read32(qdev, SEM); /* flush */
157 /* This function waits for a specific bit to come ready
158 * in a given register. It is used mostly by the initialize
159 * process, but is also used in kernel thread API such as
160 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
162 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 for (count = 0; count < UDELAY_COUNT; count++) {
168 temp = ql_read32(qdev, reg);
170 /* check for errors */
171 if (temp & err_bit) {
172 netif_alert(qdev, probe, qdev->ndev,
173 "register 0x%.08x access error, value = 0x%.08x!.\n",
176 } else if (temp & bit) {
179 udelay(UDELAY_DELAY);
181 netif_alert(qdev, probe, qdev->ndev,
182 "Timed out waiting for reg %x to come ready.\n", reg);
186 /* The CFG register is used to download TX and RX control blocks
187 * to the chip. This function waits for an operation to complete.
189 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 for (count = 0; count < UDELAY_COUNT; count++) {
195 temp = ql_read32(qdev, CFG);
200 udelay(UDELAY_DELAY);
205 /* Used to issue init control blocks to hw. Maps control block,
206 * sets address, triggers download, waits for completion.
208 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
217 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
218 direction = DMA_TO_DEVICE;
220 direction = DMA_FROM_DEVICE;
222 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
223 if (dma_mapping_error(&qdev->pdev->dev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 status = ql_wait_cfg(qdev, bit);
234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
239 ql_write32(qdev, ICB_L, (u32)map);
240 ql_write32(qdev, ICB_H, (u32)(map >> 32));
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
247 * Wait for the bit to clear after signaling hw.
249 status = ql_wait_cfg(qdev, bit);
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
253 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
257 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
258 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
265 case MAC_ADDR_TYPE_MULTI_MAC:
266 case MAC_ADDR_TYPE_CAM_MAC: {
267 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270 ql_write32(qdev, MAC_ADDR_IDX,
271 (offset++) | /* offset */
272 (index << MAC_ADDR_IDX_SHIFT) | /* index */
273 MAC_ADDR_ADR | MAC_ADDR_RS |
275 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
279 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
282 ql_write32(qdev, MAC_ADDR_IDX,
283 (offset++) | /* offset */
284 (index << MAC_ADDR_IDX_SHIFT) | /* index */
285 MAC_ADDR_ADR | MAC_ADDR_RS |
287 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
290 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
291 if (type == MAC_ADDR_TYPE_CAM_MAC) {
292 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
296 ql_write32(qdev, MAC_ADDR_IDX,
297 (offset++) | /* offset */
299 << MAC_ADDR_IDX_SHIFT) | /* index */
301 MAC_ADDR_RS | type); /* type */
302 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 case MAC_ADDR_TYPE_VLAN:
311 case MAC_ADDR_TYPE_MULTI_FLTR:
313 netif_crit(qdev, ifup, qdev->ndev,
314 "Address type %d not yet supported.\n", type);
320 /* Set up a MAC, multicast or VLAN address for the
321 * inbound frame matching.
323 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
330 case MAC_ADDR_TYPE_MULTI_MAC: {
331 u32 upper = (addr[0] << 8) | addr[1];
332 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
338 ql_write32(qdev, MAC_ADDR_IDX,
339 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 ql_write32(qdev, MAC_ADDR_DATA, lower);
342 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
345 ql_write32(qdev, MAC_ADDR_IDX,
346 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
349 ql_write32(qdev, MAC_ADDR_DATA, upper);
350 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
353 case MAC_ADDR_TYPE_CAM_MAC: {
355 u32 upper = (addr[0] << 8) | addr[1];
356 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
361 ql_write32(qdev, MAC_ADDR_IDX,
362 (offset++) | /* offset */
363 (index << MAC_ADDR_IDX_SHIFT) | /* index */
365 ql_write32(qdev, MAC_ADDR_DATA, lower);
366 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 ql_write32(qdev, MAC_ADDR_IDX,
370 (offset++) | /* offset */
371 (index << MAC_ADDR_IDX_SHIFT) | /* index */
373 ql_write32(qdev, MAC_ADDR_DATA, upper);
374 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
377 ql_write32(qdev, MAC_ADDR_IDX,
378 (offset) | /* offset */
379 (index << MAC_ADDR_IDX_SHIFT) | /* index */
381 /* This field should also include the queue id
382 * and possibly the function id. Right now we hardcode
383 * the route field to NIC core.
385 cam_output = (CAM_OUT_ROUTE_NIC |
386 (qdev->func << CAM_OUT_FUNC_SHIFT) |
387 (0 << CAM_OUT_CQ_ID_SHIFT));
388 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
389 cam_output |= CAM_OUT_RV;
390 /* route to NIC core */
391 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
394 case MAC_ADDR_TYPE_VLAN: {
395 u32 enable_bit = *((u32 *)&addr[0]);
396 /* For VLAN, the addr actually holds a bit that
397 * either enables or disables the vlan id we are
398 * addressing. It's either MAC_ADDR_E on or off.
399 * That's bit-27 we're talking about.
401 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
404 ql_write32(qdev, MAC_ADDR_IDX,
405 offset | /* offset */
406 (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 enable_bit); /* enable/disable */
411 case MAC_ADDR_TYPE_MULTI_FLTR:
413 netif_crit(qdev, ifup, qdev->ndev,
414 "Address type %d not yet supported.\n", type);
420 /* Set or clear MAC address in hardware. We sometimes
421 * have to clear it to prevent wrong frame routing
422 * especially in a bonding environment.
424 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
427 char zero_mac_addr[ETH_ALEN];
431 addr = &qdev->current_mac_addr[0];
432 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
433 "Set Mac addr %pM\n", addr);
435 eth_zero_addr(zero_mac_addr);
436 addr = &zero_mac_addr[0];
437 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
438 "Clearing MAC address\n");
440 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
443 status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
444 MAC_ADDR_TYPE_CAM_MAC,
445 qdev->func * MAX_CQ);
446 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 netif_err(qdev, ifup, qdev->ndev,
449 "Failed to init mac address.\n");
453 void ql_link_on(struct ql_adapter *qdev)
455 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
456 netif_carrier_on(qdev->ndev);
457 ql_set_mac_addr(qdev, 1);
460 void ql_link_off(struct ql_adapter *qdev)
462 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
467 /* Get a specific frame routing value from the CAM.
468 * Used for debug and reg dump.
470 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
474 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
478 ql_write32(qdev, RT_IDX,
479 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
480 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
483 *value = ql_read32(qdev, RT_DATA);
488 /* The NIC function for this chip has 16 routing indexes. Each one can be used
489 * to route different frame types to various inbound queues. We send broadcast/
490 * multicast/error frames to the default queue for slow handling,
491 * and CAM hit/RSS frames to the fast handling queues.
493 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
496 int status = -EINVAL; /* Return error if no mask match. */
502 value = RT_IDX_DST_CAM_Q | /* dest */
503 RT_IDX_TYPE_NICQ | /* type */
504 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
507 case RT_IDX_VALID: /* Promiscuous Mode frames. */
509 value = RT_IDX_DST_DFLT_Q | /* dest */
510 RT_IDX_TYPE_NICQ | /* type */
511 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
514 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
516 value = RT_IDX_DST_DFLT_Q | /* dest */
517 RT_IDX_TYPE_NICQ | /* type */
518 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
521 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
523 value = RT_IDX_DST_DFLT_Q | /* dest */
524 RT_IDX_TYPE_NICQ | /* type */
525 (RT_IDX_IP_CSUM_ERR_SLOT <<
526 RT_IDX_IDX_SHIFT); /* index */
529 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
531 value = RT_IDX_DST_DFLT_Q | /* dest */
532 RT_IDX_TYPE_NICQ | /* type */
533 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
534 RT_IDX_IDX_SHIFT); /* index */
537 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
544 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
546 value = RT_IDX_DST_DFLT_Q | /* dest */
547 RT_IDX_TYPE_NICQ | /* type */
548 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
551 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
553 value = RT_IDX_DST_DFLT_Q | /* dest */
554 RT_IDX_TYPE_NICQ | /* type */
555 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
558 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
560 value = RT_IDX_DST_RSS | /* dest */
561 RT_IDX_TYPE_NICQ | /* type */
562 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 case 0: /* Clear the E-bit on an entry. */
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (index << RT_IDX_IDX_SHIFT);/* index */
573 netif_err(qdev, ifup, qdev->ndev,
574 "Mask type %d not yet supported.\n", mask);
580 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
583 value |= (enable ? RT_IDX_E : 0);
584 ql_write32(qdev, RT_IDX, value);
585 ql_write32(qdev, RT_DATA, enable ? mask : 0);
591 static void ql_enable_interrupts(struct ql_adapter *qdev)
593 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
596 static void ql_disable_interrupts(struct ql_adapter *qdev)
598 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
601 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603 struct intr_context *ctx = &qdev->intr_context[intr];
605 ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
608 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
610 struct intr_context *ctx = &qdev->intr_context[intr];
612 ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
615 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
619 for (i = 0; i < qdev->intr_count; i++)
620 ql_enable_completion_interrupt(qdev, i);
623 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
627 __le16 *flash = (__le16 *)&qdev->flash;
629 status = strncmp((char *)&qdev->flash, str, 4);
631 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
635 for (i = 0; i < size; i++)
636 csum += le16_to_cpu(*flash++);
639 netif_err(qdev, ifup, qdev->ndev,
640 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
645 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
648 /* wait for reg to come ready */
649 status = ql_wait_reg_rdy(qdev,
650 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
653 /* set up for reg read */
654 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
655 /* wait for reg to come ready */
656 status = ql_wait_reg_rdy(qdev,
657 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
660 /* This data is stored on flash as an array of
661 * __le32. Since ql_read32() returns cpu endian
662 * we need to swap it back.
664 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
669 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
673 __le32 *p = (__le32 *)&qdev->flash;
677 /* Get flash offset for function and adjust
681 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
688 size = sizeof(struct flash_params_8000) / sizeof(u32);
689 for (i = 0; i < size; i++, p++) {
690 status = ql_read_flash_word(qdev, i + offset, p);
692 netif_err(qdev, ifup, qdev->ndev,
693 "Error reading flash.\n");
698 status = ql_validate_flash(qdev,
699 sizeof(struct flash_params_8000) /
703 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
708 /* Extract either manufacturer or BOFM modified
711 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 qdev->flash.flash_params_8000.mac_addr1,
714 qdev->ndev->addr_len);
717 qdev->flash.flash_params_8000.mac_addr,
718 qdev->ndev->addr_len);
720 if (!is_valid_ether_addr(mac_addr)) {
721 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
726 memcpy(qdev->ndev->dev_addr,
728 qdev->ndev->addr_len);
731 ql_sem_unlock(qdev, SEM_FLASH_MASK);
735 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
739 __le32 *p = (__le32 *)&qdev->flash;
741 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
743 /* Second function's parameters follow the first
749 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
752 for (i = 0; i < size; i++, p++) {
753 status = ql_read_flash_word(qdev, i + offset, p);
755 netif_err(qdev, ifup, qdev->ndev,
756 "Error reading flash.\n");
762 status = ql_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
782 ql_sem_unlock(qdev, SEM_FLASH_MASK);
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787 * register pair. Each read/write requires us to wait for the ready
788 * bit before reading/writing the data.
790 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
793 /* wait for reg to come ready */
794 status = ql_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
798 /* write the data to the data reg */
799 ql_write32(qdev, XGMAC_DATA, data);
800 /* trigger the write */
801 ql_write32(qdev, XGMAC_ADDR, reg);
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806 * register pair. Each read/write requires us to wait for the ready
807 * bit before reading/writing the data.
809 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
812 /* wait for reg to come ready */
813 status = ql_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
817 /* set up for reg read */
818 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819 /* wait for reg to come ready */
820 status = ql_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
825 *data = ql_read32(qdev, XGMAC_DATA);
830 /* This is used for reading the 64-bit statistics regs. */
831 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
837 status = ql_read_xgmac_reg(qdev, reg, &lo);
841 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
845 *data = (u64)lo | ((u64)hi << 32);
851 static int ql_8000_port_initialize(struct ql_adapter *qdev)
855 * Get MPI firmware version for driver banner
858 status = ql_mb_about_fw(qdev);
861 status = ql_mb_get_fw_state(qdev);
864 /* Wake up a worker to get/set the TX/RX frame sizes. */
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
870 /* Take the MAC Core out of reset.
871 * Enable statistics counting.
872 * Take the transmitter/receiver out of reset.
873 * This functionality may be done in the MPI firmware at a
876 static int ql_8012_port_initialize(struct ql_adapter *qdev)
881 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
882 /* Another function has the semaphore, so
883 * wait for the port init bit to come ready.
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896 /* Set the core reset. */
897 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
900 data |= GLOBAL_CFG_RESET;
901 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
905 /* Clear the core reset and turn on jumbo for receiver. */
906 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
907 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
914 /* Enable transmitter, and clear it's reset. */
915 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
918 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
919 data |= TX_CFG_EN; /* Enable the transmitter. */
920 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
924 /* Enable receiver and clear it's reset. */
925 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
928 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
929 data |= RX_CFG_EN; /* Enable the receiver. */
930 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
936 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
940 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
944 /* Signal to the world that the port is enabled. */
945 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
947 ql_sem_unlock(qdev, qdev->xg_sem_mask);
951 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
953 return PAGE_SIZE << qdev->lbq_buf_order;
956 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
958 struct qlge_bq_desc *bq_desc;
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
966 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
967 struct rx_ring *rx_ring)
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 ql_lbq_block_size(qdev)) {
976 /* last chunk of the master page */
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
984 /* Update an rx ring index. */
985 static void ql_update_cq(struct rx_ring *rx_ring)
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
995 static void ql_write_cq_idx(struct rx_ring *rx_ring)
997 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1000 static const char * const bq_type_name[] = {
1005 /* return 0 or negative error */
1006 static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1009 struct ql_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1012 if (sbq_desc->p.skb)
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1022 skb_reserve(skb, QLGE_SB_PAD);
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1034 sbq_desc->p.skb = skb;
1038 /* return 0 or negative error */
1039 static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1042 struct ql_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1045 if (!master_chunk->page) {
1047 dma_addr_t dma_addr;
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 ql_lbq_block_size(qdev),
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1072 /* Adjust the master page chunk for next
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1086 /* return 0 or negative error */
1087 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct ql_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1123 bq_desc = &bq->queue[0];
1127 } while (refill_count);
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1136 ql_write_db_reg(i, bq->prod_idx_db_reg);
1138 bq->next_to_use = i;
1144 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1147 bool sbq_fail, lbq_fail;
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1152 /* Minimum number of buffers needed to be able to receive at least one
1153 * frame of any format:
1154 * sbq: 1 for header + 1 for data
1155 * lbq: mtu 9000 / lb size
1156 * Below this, the queue might stall.
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161 /* Allocations can take a long time in certain cases (ex.
1162 * reclaim). Therefore, use a workqueue for long-running
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1169 static void qlge_slow_refill(struct work_struct *work)
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1173 struct napi_struct *napi = &rx_ring->napi;
1176 ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1180 /* napi_disable() might have prevented incomplete napi work from being
1183 napi_schedule(napi);
1184 /* trigger softirq processing */
1188 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1189 * fails at some stage, or from the interrupt when a tx completes.
1191 static void ql_unmap_send(struct ql_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1199 * Unmap the skb->data area, or the
1200 * external sglist (AKA the Outbound
1201 * Address List (OAL)).
1202 * If its the zeroeth element, then it's
1203 * the skb->data area. If it's the 7th
1204 * element and there is more than 6 frags,
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1210 "unmapping OAL area.\n");
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1215 dma_unmap_len(&tx_ring_desc->map[i],
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1231 /* Map the buffers for this transmit. This will return
1232 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1234 static int ql_map_send(struct ql_adapter *qdev,
1235 struct ob_mac_iocb_req *mac_iocb_ptr,
1236 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1238 int len = skb_headlen(skb);
1240 int frag_idx, err, map_idx = 0;
1241 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1242 int frag_cnt = skb_shinfo(skb)->nr_frags;
1245 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1246 "frag_cnt = %d.\n", frag_cnt);
1249 * Map the skb buffer first.
1251 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1253 err = dma_mapping_error(&qdev->pdev->dev, map);
1255 netif_err(qdev, tx_queued, qdev->ndev,
1256 "PCI mapping failed with error: %d\n", err);
1258 return NETDEV_TX_BUSY;
1261 tbd->len = cpu_to_le32(len);
1262 tbd->addr = cpu_to_le64(map);
1263 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1264 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1268 * This loop fills the remainder of the 8 address descriptors
1269 * in the IOCB. If there are more than 7 fragments, then the
1270 * eighth address desc will point to an external list (OAL).
1271 * When this happens, the remainder of the frags will be stored
1274 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1275 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1278 if (frag_idx == 6 && frag_cnt > 7) {
1279 /* Let's tack on an sglist.
1280 * Our control block will now
1282 * iocb->seg[0] = skb->data
1283 * iocb->seg[1] = frag[0]
1284 * iocb->seg[2] = frag[1]
1285 * iocb->seg[3] = frag[2]
1286 * iocb->seg[4] = frag[3]
1287 * iocb->seg[5] = frag[4]
1288 * iocb->seg[6] = frag[5]
1289 * iocb->seg[7] = ptr to OAL (external sglist)
1290 * oal->seg[0] = frag[6]
1291 * oal->seg[1] = frag[7]
1292 * oal->seg[2] = frag[8]
1293 * oal->seg[3] = frag[9]
1294 * oal->seg[4] = frag[10]
1297 /* Tack on the OAL in the eighth segment of IOCB. */
1298 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1301 err = dma_mapping_error(&qdev->pdev->dev, map);
1303 netif_err(qdev, tx_queued, qdev->ndev,
1304 "PCI mapping outbound address list with error: %d\n",
1309 tbd->addr = cpu_to_le64(map);
1311 * The length is the number of fragments
1312 * that remain to be mapped times the length
1313 * of our sglist (OAL).
1316 cpu_to_le32((sizeof(struct tx_buf_desc) *
1317 (frag_cnt - frag_idx)) | TX_DESC_C);
1318 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1320 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1321 sizeof(struct oal));
1322 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1326 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1329 err = dma_mapping_error(&qdev->pdev->dev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping frags failed with error: %d.\n",
1337 tbd->addr = cpu_to_le64(map);
1338 tbd->len = cpu_to_le32(skb_frag_size(frag));
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1341 skb_frag_size(frag));
1344 /* Save the number of segments we've mapped. */
1345 tx_ring_desc->map_cnt = map_idx;
1346 /* Terminate the last segment. */
1347 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1348 return NETDEV_TX_OK;
1352 * If the first frag mapping failed, then i will be zero.
1353 * This causes the unmap of the skb->data area. Otherwise
1354 * we pass in the number of frags that mapped successfully
1355 * so they can be umapped.
1357 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1358 return NETDEV_TX_BUSY;
1361 /* Categorizing receive firmware frame errors */
1362 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1363 struct rx_ring *rx_ring)
1365 struct nic_stats *stats = &qdev->nic_stats;
1367 stats->rx_err_count++;
1368 rx_ring->rx_errors++;
1370 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1371 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1372 stats->rx_code_err++;
1374 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1375 stats->rx_oversize_err++;
1377 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1378 stats->rx_undersize_err++;
1380 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1381 stats->rx_preamble_err++;
1383 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1384 stats->rx_frame_len_err++;
1386 case IB_MAC_IOCB_RSP_ERR_CRC:
1387 stats->rx_crc_err++;
1394 * ql_update_mac_hdr_len - helper routine to update the mac header length
1395 * based on vlan tags if present
1397 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1398 struct ib_mac_iocb_rsp *ib_mac_rsp,
1399 void *page, size_t *len)
1403 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1405 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1407 /* Look for stacked vlan tags in ethertype field */
1408 if (tags[6] == ETH_P_8021Q &&
1409 tags[8] == ETH_P_8021Q)
1410 *len += 2 * VLAN_HLEN;
1416 /* Process an inbound completion from an rx ring. */
1417 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1418 struct rx_ring *rx_ring,
1419 struct ib_mac_iocb_rsp *ib_mac_rsp,
1420 u32 length, u16 vlan_id)
1422 struct sk_buff *skb;
1423 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1424 struct napi_struct *napi = &rx_ring->napi;
1426 /* Frame error, so drop the packet. */
1427 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1428 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1429 put_page(lbq_desc->p.pg_chunk.page);
1432 napi->dev = qdev->ndev;
1434 skb = napi_get_frags(napi);
1436 netif_err(qdev, drv, qdev->ndev,
1437 "Couldn't get an skb, exiting.\n");
1438 rx_ring->rx_dropped++;
1439 put_page(lbq_desc->p.pg_chunk.page);
1442 prefetch(lbq_desc->p.pg_chunk.va);
1443 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1444 lbq_desc->p.pg_chunk.page,
1445 lbq_desc->p.pg_chunk.offset,
1449 skb->data_len += length;
1450 skb->truesize += length;
1451 skb_shinfo(skb)->nr_frags++;
1453 rx_ring->rx_packets++;
1454 rx_ring->rx_bytes += length;
1455 skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 skb_record_rx_queue(skb, rx_ring->cq_id);
1457 if (vlan_id != 0xffff)
1458 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1459 napi_gro_frags(napi);
1462 /* Process an inbound completion from an rx ring. */
1463 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1464 struct rx_ring *rx_ring,
1465 struct ib_mac_iocb_rsp *ib_mac_rsp,
1466 u32 length, u16 vlan_id)
1468 struct net_device *ndev = qdev->ndev;
1469 struct sk_buff *skb = NULL;
1471 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1472 struct napi_struct *napi = &rx_ring->napi;
1473 size_t hlen = ETH_HLEN;
1475 skb = netdev_alloc_skb(ndev, length);
1477 rx_ring->rx_dropped++;
1478 put_page(lbq_desc->p.pg_chunk.page);
1482 addr = lbq_desc->p.pg_chunk.va;
1485 /* Frame error, so drop the packet. */
1486 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1487 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1491 /* Update the MAC header length*/
1492 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1494 /* The max framesize filter on this chip is set higher than
1495 * MTU since FCoE uses 2k frames.
1497 if (skb->len > ndev->mtu + hlen) {
1498 netif_err(qdev, drv, qdev->ndev,
1499 "Segment too small, dropping.\n");
1500 rx_ring->rx_dropped++;
1503 skb_put_data(skb, addr, hlen);
1504 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1505 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1507 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1508 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1509 skb->len += length - hlen;
1510 skb->data_len += length - hlen;
1511 skb->truesize += length - hlen;
1513 rx_ring->rx_packets++;
1514 rx_ring->rx_bytes += skb->len;
1515 skb->protocol = eth_type_trans(skb, ndev);
1516 skb_checksum_none_assert(skb);
1518 if ((ndev->features & NETIF_F_RXCSUM) &&
1519 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1521 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1522 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1523 "TCP checksum done!\n");
1524 skb->ip_summed = CHECKSUM_UNNECESSARY;
1525 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1526 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1527 /* Unfragmented ipv4 UDP frame. */
1529 (struct iphdr *)((u8 *)addr + hlen);
1530 if (!(iph->frag_off &
1531 htons(IP_MF | IP_OFFSET))) {
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 netif_printk(qdev, rx_status, KERN_DEBUG,
1535 "UDP checksum done!\n");
1540 skb_record_rx_queue(skb, rx_ring->cq_id);
1541 if (vlan_id != 0xffff)
1542 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1543 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1544 napi_gro_receive(napi, skb);
1546 netif_receive_skb(skb);
1549 dev_kfree_skb_any(skb);
1550 put_page(lbq_desc->p.pg_chunk.page);
1553 /* Process an inbound completion from an rx ring. */
1554 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1555 struct rx_ring *rx_ring,
1556 struct ib_mac_iocb_rsp *ib_mac_rsp,
1557 u32 length, u16 vlan_id)
1559 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1560 struct net_device *ndev = qdev->ndev;
1561 struct sk_buff *skb, *new_skb;
1563 skb = sbq_desc->p.skb;
1564 /* Allocate new_skb and copy */
1565 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1567 rx_ring->rx_dropped++;
1570 skb_reserve(new_skb, NET_IP_ALIGN);
1572 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1573 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1575 skb_put_data(new_skb, skb->data, length);
1579 /* Frame error, so drop the packet. */
1580 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1581 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1582 dev_kfree_skb_any(skb);
1586 /* loopback self test for ethtool */
1587 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1588 ql_check_lb_frame(qdev, skb);
1589 dev_kfree_skb_any(skb);
1593 /* The max framesize filter on this chip is set higher than
1594 * MTU since FCoE uses 2k frames.
1596 if (skb->len > ndev->mtu + ETH_HLEN) {
1597 dev_kfree_skb_any(skb);
1598 rx_ring->rx_dropped++;
1602 prefetch(skb->data);
1603 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1604 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1606 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1607 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1608 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1609 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1614 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1615 "Promiscuous Packet.\n");
1617 rx_ring->rx_packets++;
1618 rx_ring->rx_bytes += skb->len;
1619 skb->protocol = eth_type_trans(skb, ndev);
1620 skb_checksum_none_assert(skb);
1622 /* If rx checksum is on, and there are no
1623 * csum or frame errors.
1625 if ((ndev->features & NETIF_F_RXCSUM) &&
1626 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1628 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1629 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 "TCP checksum done!\n");
1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1633 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1634 /* Unfragmented ipv4 UDP frame. */
1635 struct iphdr *iph = (struct iphdr *)skb->data;
1637 if (!(iph->frag_off &
1638 htons(IP_MF | IP_OFFSET))) {
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 netif_printk(qdev, rx_status, KERN_DEBUG,
1642 "UDP checksum done!\n");
1647 skb_record_rx_queue(skb, rx_ring->cq_id);
1648 if (vlan_id != 0xffff)
1649 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1650 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1651 napi_gro_receive(&rx_ring->napi, skb);
1653 netif_receive_skb(skb);
1656 static void ql_realign_skb(struct sk_buff *skb, int len)
1658 void *temp_addr = skb->data;
1660 /* Undo the skb_reserve(skb,32) we did before
1661 * giving to hardware, and realign data on
1662 * a 2-byte boundary.
1664 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1666 memmove(skb->data, temp_addr, len);
1670 * This function builds an skb for the given inbound
1671 * completion. It will be rewritten for readability in the near
1672 * future, but for not it works well.
1674 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1675 struct rx_ring *rx_ring,
1676 struct ib_mac_iocb_rsp *ib_mac_rsp)
1678 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1679 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1680 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1681 struct sk_buff *skb = NULL;
1682 size_t hlen = ETH_HLEN;
1685 * Handle the header buffer if present.
1687 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1688 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Header of %d bytes in small buffer.\n", hdr_len);
1692 * Headers fit nicely into a small buffer.
1694 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1695 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1696 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1697 skb = sbq_desc->p.skb;
1698 ql_realign_skb(skb, hdr_len);
1699 skb_put(skb, hdr_len);
1700 sbq_desc->p.skb = NULL;
1704 * Handle the data buffer(s).
1706 if (unlikely(!length)) { /* Is there data too? */
1707 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1708 "No Data buffer in this packet.\n");
1712 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1713 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "Headers in small, data of %d bytes in small, combine them.\n",
1718 * Data is less than small buffer size so it's
1719 * stuffed in a small buffer.
1720 * For this case we append the data
1721 * from the "data" small buffer to the "header" small
1724 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1725 dma_sync_single_for_cpu(&qdev->pdev->dev,
1729 skb_put_data(skb, sbq_desc->p.skb->data, length);
1731 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732 "%d bytes in a single small buffer.\n",
1734 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1735 skb = sbq_desc->p.skb;
1736 ql_realign_skb(skb, length);
1737 skb_put(skb, length);
1738 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1741 sbq_desc->p.skb = NULL;
1743 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1744 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1745 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1746 "Header in small, %d bytes in large. Chain large to small!\n",
1749 * The data is in a single large buffer. We
1750 * chain it to the header buffer's skb and let
1753 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Chaining page at offset = %d, for %d bytes to skb.\n",
1756 lbq_desc->p.pg_chunk.offset, length);
1757 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1758 lbq_desc->p.pg_chunk.offset, length);
1760 skb->data_len += length;
1761 skb->truesize += length;
1764 * The headers and data are in a single large buffer. We
1765 * copy it to a new skb and let it go. This can happen with
1766 * jumbo mtu on a non-TCP/UDP frame.
1768 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769 skb = netdev_alloc_skb(qdev->ndev, length);
1771 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1772 "No skb available, drop the packet.\n");
1775 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1778 skb_reserve(skb, NET_IP_ALIGN);
1779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1782 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1783 lbq_desc->p.pg_chunk.offset,
1786 skb->data_len += length;
1787 skb->truesize += length;
1788 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1789 lbq_desc->p.pg_chunk.va,
1791 __pskb_pull_tail(skb, hlen);
1795 * The data is in a chain of large buffers
1796 * pointed to by a small buffer. We loop
1797 * thru and chain them to the our small header
1799 * frags: There are 18 max frags and our small
1800 * buffer will hold 32 of them. The thing is,
1801 * we'll use 3 max for our 9000 byte jumbo
1802 * frames. If the MTU goes up we could
1803 * eventually be in trouble.
1807 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1808 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1809 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1810 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1812 * This is an non TCP/UDP IP frame, so
1813 * the headers aren't split into a small
1814 * buffer. We have to use the small buffer
1815 * that contains our sg list as our skb to
1816 * send upstairs. Copy the sg list here to
1817 * a local buffer and use it to find the
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes of headers & data in chain of large.\n",
1823 skb = sbq_desc->p.skb;
1824 sbq_desc->p.skb = NULL;
1825 skb_reserve(skb, NET_IP_ALIGN);
1828 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1829 size = min(length, qdev->lbq_buf_size);
1831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "Adding page %d to skb for %d bytes.\n",
1834 skb_fill_page_desc(skb, i,
1835 lbq_desc->p.pg_chunk.page,
1836 lbq_desc->p.pg_chunk.offset, size);
1838 skb->data_len += size;
1839 skb->truesize += size;
1842 } while (length > 0);
1843 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1845 __pskb_pull_tail(skb, hlen);
1850 /* Process an inbound completion from an rx ring. */
1851 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1852 struct rx_ring *rx_ring,
1853 struct ib_mac_iocb_rsp *ib_mac_rsp,
1856 struct net_device *ndev = qdev->ndev;
1857 struct sk_buff *skb = NULL;
1859 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1861 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1862 if (unlikely(!skb)) {
1863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "No skb available, drop packet.\n");
1865 rx_ring->rx_dropped++;
1869 /* Frame error, so drop the packet. */
1870 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1871 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1872 dev_kfree_skb_any(skb);
1876 /* The max framesize filter on this chip is set higher than
1877 * MTU since FCoE uses 2k frames.
1879 if (skb->len > ndev->mtu + ETH_HLEN) {
1880 dev_kfree_skb_any(skb);
1881 rx_ring->rx_dropped++;
1885 /* loopback self test for ethtool */
1886 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1887 ql_check_lb_frame(qdev, skb);
1888 dev_kfree_skb_any(skb);
1892 prefetch(skb->data);
1893 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1894 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1895 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1896 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1897 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1898 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1899 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1900 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1901 rx_ring->rx_multicast++;
1903 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "Promiscuous Packet.\n");
1908 skb->protocol = eth_type_trans(skb, ndev);
1909 skb_checksum_none_assert(skb);
1911 /* If rx checksum is on, and there are no
1912 * csum or frame errors.
1914 if ((ndev->features & NETIF_F_RXCSUM) &&
1915 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1917 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "TCP checksum done!\n");
1920 skb->ip_summed = CHECKSUM_UNNECESSARY;
1921 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1922 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1923 /* Unfragmented ipv4 UDP frame. */
1924 struct iphdr *iph = (struct iphdr *)skb->data;
1926 if (!(iph->frag_off &
1927 htons(IP_MF | IP_OFFSET))) {
1928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1930 "TCP checksum done!\n");
1935 rx_ring->rx_packets++;
1936 rx_ring->rx_bytes += skb->len;
1937 skb_record_rx_queue(skb, rx_ring->cq_id);
1938 if (vlan_id != 0xffff)
1939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1940 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1941 napi_gro_receive(&rx_ring->napi, skb);
1943 netif_receive_skb(skb);
1946 /* Process an inbound completion from an rx ring. */
1947 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1948 struct rx_ring *rx_ring,
1949 struct ib_mac_iocb_rsp *ib_mac_rsp)
1951 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1952 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1953 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1954 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1955 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1957 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1959 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1960 /* The data and headers are split into
1963 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1965 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1966 /* The data fit in a single small buffer.
1967 * Allocate a new skb, copy the data and
1968 * return the buffer to the free pool.
1970 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1972 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1973 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1974 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1975 /* TCP packet in a page chunk that's been checksummed.
1976 * Tack it on to our GRO skb and let it go.
1978 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1980 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1981 /* Non-TCP packet in a page chunk. Allocate an
1982 * skb, tack it on frags, and send it up.
1984 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1987 /* Non-TCP/UDP large frames that span multiple buffers
1988 * can be processed corrrectly by the split frame logic.
1990 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1994 return (unsigned long)length;
1997 /* Process an outbound completion from an rx ring. */
1998 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1999 struct ob_mac_iocb_rsp *mac_rsp)
2001 struct tx_ring *tx_ring;
2002 struct tx_ring_desc *tx_ring_desc;
2004 QL_DUMP_OB_MAC_RSP(mac_rsp);
2005 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2006 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2007 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2008 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2009 tx_ring->tx_packets++;
2010 dev_kfree_skb(tx_ring_desc->skb);
2011 tx_ring_desc->skb = NULL;
2013 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2016 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2017 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2018 netif_warn(qdev, tx_done, qdev->ndev,
2019 "Total descriptor length did not match transfer length.\n");
2021 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2022 netif_warn(qdev, tx_done, qdev->ndev,
2023 "Frame too short to be valid, not sent.\n");
2025 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2026 netif_warn(qdev, tx_done, qdev->ndev,
2027 "Frame too long, but sent anyway.\n");
2029 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2030 netif_warn(qdev, tx_done, qdev->ndev,
2031 "PCI backplane error. Frame not sent.\n");
2034 atomic_inc(&tx_ring->tx_count);
2037 /* Fire up a handler to reset the MPI processor. */
2038 void ql_queue_fw_error(struct ql_adapter *qdev)
2041 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2044 void ql_queue_asic_error(struct ql_adapter *qdev)
2047 ql_disable_interrupts(qdev);
2048 /* Clear adapter up bit to signal the recovery
2049 * process that it shouldn't kill the reset worker
2052 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2053 /* Set asic recovery bit to indicate reset process that we are
2054 * in fatal error recovery process rather than normal close
2056 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2057 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2060 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2061 struct ib_ae_iocb_rsp *ib_ae_rsp)
2063 switch (ib_ae_rsp->event) {
2064 case MGMT_ERR_EVENT:
2065 netif_err(qdev, rx_err, qdev->ndev,
2066 "Management Processor Fatal Error.\n");
2067 ql_queue_fw_error(qdev);
2070 case CAM_LOOKUP_ERR_EVENT:
2071 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2072 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2073 ql_queue_asic_error(qdev);
2076 case SOFT_ECC_ERROR_EVENT:
2077 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2078 ql_queue_asic_error(qdev);
2081 case PCI_ERR_ANON_BUF_RD:
2082 netdev_err(qdev->ndev, "PCI error occurred when reading "
2083 "anonymous buffers from rx_ring %d.\n",
2085 ql_queue_asic_error(qdev);
2089 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2091 ql_queue_asic_error(qdev);
2096 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2098 struct ql_adapter *qdev = rx_ring->qdev;
2099 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2100 struct ob_mac_iocb_rsp *net_rsp = NULL;
2103 struct tx_ring *tx_ring;
2104 /* While there are entries in the completion queue. */
2105 while (prod != rx_ring->cnsmr_idx) {
2107 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2108 "cq_id = %d, prod = %d, cnsmr = %d\n",
2109 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2111 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2113 switch (net_rsp->opcode) {
2115 case OPCODE_OB_MAC_TSO_IOCB:
2116 case OPCODE_OB_MAC_IOCB:
2117 ql_process_mac_tx_intr(qdev, net_rsp);
2120 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2125 ql_update_cq(rx_ring);
2126 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2130 ql_write_cq_idx(rx_ring);
2131 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2132 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2133 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2135 * The queue got stopped because the tx_ring was full.
2136 * Wake it up, because it's now at least 25% empty.
2138 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2144 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2146 struct ql_adapter *qdev = rx_ring->qdev;
2147 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2148 struct ql_net_rsp_iocb *net_rsp;
2151 /* While there are entries in the completion queue. */
2152 while (prod != rx_ring->cnsmr_idx) {
2154 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2155 "cq_id = %d, prod = %d, cnsmr = %d\n",
2156 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2158 net_rsp = rx_ring->curr_entry;
2160 switch (net_rsp->opcode) {
2161 case OPCODE_IB_MAC_IOCB:
2162 ql_process_mac_rx_intr(qdev, rx_ring,
2163 (struct ib_mac_iocb_rsp *)
2167 case OPCODE_IB_AE_IOCB:
2168 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2172 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2173 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2178 ql_update_cq(rx_ring);
2179 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 if (count == budget)
2183 ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2184 ql_write_cq_idx(rx_ring);
2188 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2190 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2191 struct ql_adapter *qdev = rx_ring->qdev;
2192 struct rx_ring *trx_ring;
2193 int i, work_done = 0;
2194 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2199 /* Service the TX rings first. They start
2200 * right after the RSS rings.
2202 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2203 trx_ring = &qdev->rx_ring[i];
2204 /* If this TX completion ring belongs to this vector and
2205 * it's not empty then service it.
2207 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2208 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2209 trx_ring->cnsmr_idx)) {
2210 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2211 "%s: Servicing TX completion ring %d.\n",
2212 __func__, trx_ring->cq_id);
2213 ql_clean_outbound_rx_ring(trx_ring);
2218 * Now service the RSS ring if it's active.
2220 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2221 rx_ring->cnsmr_idx) {
2222 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2223 "%s: Servicing RX completion ring %d.\n",
2224 __func__, rx_ring->cq_id);
2225 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2228 if (work_done < budget) {
2229 napi_complete_done(napi, work_done);
2230 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2235 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2237 struct ql_adapter *qdev = netdev_priv(ndev);
2239 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2240 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2241 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2243 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2248 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2249 * based on the features to enable/disable hardware vlan accel
2251 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2252 netdev_features_t features)
2254 struct ql_adapter *qdev = netdev_priv(ndev);
2256 bool need_restart = netif_running(ndev);
2259 status = ql_adapter_down(qdev);
2261 netif_err(qdev, link, qdev->ndev,
2262 "Failed to bring down the adapter\n");
2267 /* update the features with resent change */
2268 ndev->features = features;
2271 status = ql_adapter_up(qdev);
2273 netif_err(qdev, link, qdev->ndev,
2274 "Failed to bring up the adapter\n");
2282 static int qlge_set_features(struct net_device *ndev,
2283 netdev_features_t features)
2285 netdev_features_t changed = ndev->features ^ features;
2288 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2289 /* Update the behavior of vlan accel in the adapter */
2290 err = qlge_update_hw_vlan_features(ndev, features);
2294 qlge_vlan_mode(ndev, features);
2300 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2302 u32 enable_bit = MAC_ADDR_E;
2305 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2306 MAC_ADDR_TYPE_VLAN, vid);
2308 netif_err(qdev, ifup, qdev->ndev,
2309 "Failed to init vlan address.\n");
2313 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2315 struct ql_adapter *qdev = netdev_priv(ndev);
2319 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2323 err = __qlge_vlan_rx_add_vid(qdev, vid);
2324 set_bit(vid, qdev->active_vlans);
2326 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2331 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2336 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2337 MAC_ADDR_TYPE_VLAN, vid);
2339 netif_err(qdev, ifup, qdev->ndev,
2340 "Failed to clear vlan address.\n");
2344 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2346 struct ql_adapter *qdev = netdev_priv(ndev);
2350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2354 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2355 clear_bit(vid, qdev->active_vlans);
2357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2362 static void qlge_restore_vlan(struct ql_adapter *qdev)
2367 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2371 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2372 __qlge_vlan_rx_add_vid(qdev, vid);
2374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2377 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2378 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2380 struct rx_ring *rx_ring = dev_id;
2382 napi_schedule(&rx_ring->napi);
2386 /* This handles a fatal error, MPI activity, and the default
2387 * rx_ring in an MSI-X multiple vector environment.
2388 * In MSI/Legacy environment it also process the rest of
2391 static irqreturn_t qlge_isr(int irq, void *dev_id)
2393 struct rx_ring *rx_ring = dev_id;
2394 struct ql_adapter *qdev = rx_ring->qdev;
2395 struct intr_context *intr_context = &qdev->intr_context[0];
2399 /* Experience shows that when using INTx interrupts, interrupts must
2400 * be masked manually.
2401 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2402 * (even though it is auto-masked), otherwise a later command to
2403 * enable it is not effective.
2405 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2406 ql_disable_completion_interrupt(qdev, 0);
2408 var = ql_read32(qdev, STS);
2411 * Check for fatal error.
2414 ql_disable_completion_interrupt(qdev, 0);
2415 ql_queue_asic_error(qdev);
2416 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2417 var = ql_read32(qdev, ERR_STS);
2418 netdev_err(qdev->ndev, "Resetting chip. "
2419 "Error Status Register = 0x%x\n", var);
2424 * Check MPI processor activity.
2426 if ((var & STS_PI) &&
2427 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2429 * We've got an async event or mailbox completion.
2430 * Handle it and clear the source of the interrupt.
2432 netif_err(qdev, intr, qdev->ndev,
2433 "Got MPI processor interrupt.\n");
2434 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2435 queue_delayed_work_on(smp_processor_id(),
2436 qdev->workqueue, &qdev->mpi_work, 0);
2441 * Get the bit-mask that shows the active queues for this
2442 * pass. Compare it to the queues that this irq services
2443 * and call napi if there's a match.
2445 var = ql_read32(qdev, ISR1);
2446 if (var & intr_context->irq_mask) {
2447 netif_info(qdev, intr, qdev->ndev,
2448 "Waking handler for rx_ring[0].\n");
2449 napi_schedule(&rx_ring->napi);
2452 /* Experience shows that the device sometimes signals an
2453 * interrupt but no work is scheduled from this function.
2454 * Nevertheless, the interrupt is auto-masked. Therefore, we
2455 * systematically re-enable the interrupt if we didn't
2458 ql_enable_completion_interrupt(qdev, 0);
2461 return work_done ? IRQ_HANDLED : IRQ_NONE;
2464 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2467 if (skb_is_gso(skb)) {
2469 __be16 l3_proto = vlan_get_protocol(skb);
2471 err = skb_cow_head(skb, 0);
2475 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2476 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2477 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2478 mac_iocb_ptr->total_hdrs_len =
2479 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2480 mac_iocb_ptr->net_trans_offset =
2481 cpu_to_le16(skb_network_offset(skb) |
2482 skb_transport_offset(skb)
2483 << OB_MAC_TRANSPORT_HDR_SHIFT);
2484 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2485 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2486 if (likely(l3_proto == htons(ETH_P_IP))) {
2487 struct iphdr *iph = ip_hdr(skb);
2490 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2495 } else if (l3_proto == htons(ETH_P_IPV6)) {
2496 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2497 tcp_hdr(skb)->check =
2498 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2499 &ipv6_hdr(skb)->daddr,
2507 static void ql_hw_csum_setup(struct sk_buff *skb,
2508 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2511 struct iphdr *iph = ip_hdr(skb);
2514 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2515 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2516 mac_iocb_ptr->net_trans_offset =
2517 cpu_to_le16(skb_network_offset(skb) |
2518 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2520 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2521 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2522 if (likely(iph->protocol == IPPROTO_TCP)) {
2523 check = &(tcp_hdr(skb)->check);
2524 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2525 mac_iocb_ptr->total_hdrs_len =
2526 cpu_to_le16(skb_transport_offset(skb) +
2527 (tcp_hdr(skb)->doff << 2));
2529 check = &(udp_hdr(skb)->check);
2530 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2531 mac_iocb_ptr->total_hdrs_len =
2532 cpu_to_le16(skb_transport_offset(skb) +
2533 sizeof(struct udphdr));
2535 *check = ~csum_tcpudp_magic(iph->saddr,
2536 iph->daddr, len, iph->protocol, 0);
2539 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2541 struct tx_ring_desc *tx_ring_desc;
2542 struct ob_mac_iocb_req *mac_iocb_ptr;
2543 struct ql_adapter *qdev = netdev_priv(ndev);
2545 struct tx_ring *tx_ring;
2546 u32 tx_ring_idx = (u32)skb->queue_mapping;
2548 tx_ring = &qdev->tx_ring[tx_ring_idx];
2550 if (skb_padto(skb, ETH_ZLEN))
2551 return NETDEV_TX_OK;
2553 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2554 netif_info(qdev, tx_queued, qdev->ndev,
2555 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2556 __func__, tx_ring_idx);
2557 netif_stop_subqueue(ndev, tx_ring->wq_id);
2558 tx_ring->tx_errors++;
2559 return NETDEV_TX_BUSY;
2561 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2562 mac_iocb_ptr = tx_ring_desc->queue_entry;
2563 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2565 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2566 mac_iocb_ptr->tid = tx_ring_desc->index;
2567 /* We use the upper 32-bits to store the tx queue for this IO.
2568 * When we get the completion we can use it to establish the context.
2570 mac_iocb_ptr->txq_idx = tx_ring_idx;
2571 tx_ring_desc->skb = skb;
2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2575 if (skb_vlan_tag_present(skb)) {
2576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2579 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2581 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2583 dev_kfree_skb_any(skb);
2584 return NETDEV_TX_OK;
2585 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586 ql_hw_csum_setup(skb,
2587 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2589 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2591 netif_err(qdev, tx_queued, qdev->ndev,
2592 "Could not map the segments.\n");
2593 tx_ring->tx_errors++;
2594 return NETDEV_TX_BUSY;
2596 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2597 tx_ring->prod_idx++;
2598 if (tx_ring->prod_idx == tx_ring->wq_len)
2599 tx_ring->prod_idx = 0;
2602 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2603 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2604 "tx queued, slot %d, len %d\n",
2605 tx_ring->prod_idx, skb->len);
2607 atomic_dec(&tx_ring->tx_count);
2609 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2610 netif_stop_subqueue(ndev, tx_ring->wq_id);
2611 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2613 * The queue got stopped because the tx_ring was full.
2614 * Wake it up, because it's now at least 25% empty.
2616 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2618 return NETDEV_TX_OK;
2621 static void ql_free_shadow_space(struct ql_adapter *qdev)
2623 if (qdev->rx_ring_shadow_reg_area) {
2624 dma_free_coherent(&qdev->pdev->dev,
2626 qdev->rx_ring_shadow_reg_area,
2627 qdev->rx_ring_shadow_reg_dma);
2628 qdev->rx_ring_shadow_reg_area = NULL;
2630 if (qdev->tx_ring_shadow_reg_area) {
2631 dma_free_coherent(&qdev->pdev->dev,
2633 qdev->tx_ring_shadow_reg_area,
2634 qdev->tx_ring_shadow_reg_dma);
2635 qdev->tx_ring_shadow_reg_area = NULL;
2639 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2641 qdev->rx_ring_shadow_reg_area =
2642 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2643 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2644 if (!qdev->rx_ring_shadow_reg_area) {
2645 netif_err(qdev, ifup, qdev->ndev,
2646 "Allocation of RX shadow space failed.\n");
2650 qdev->tx_ring_shadow_reg_area =
2651 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2652 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2653 if (!qdev->tx_ring_shadow_reg_area) {
2654 netif_err(qdev, ifup, qdev->ndev,
2655 "Allocation of TX shadow space failed.\n");
2656 goto err_wqp_sh_area;
2661 dma_free_coherent(&qdev->pdev->dev,
2663 qdev->rx_ring_shadow_reg_area,
2664 qdev->rx_ring_shadow_reg_dma);
2668 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2670 struct tx_ring_desc *tx_ring_desc;
2672 struct ob_mac_iocb_req *mac_iocb_ptr;
2674 mac_iocb_ptr = tx_ring->wq_base;
2675 tx_ring_desc = tx_ring->q;
2676 for (i = 0; i < tx_ring->wq_len; i++) {
2677 tx_ring_desc->index = i;
2678 tx_ring_desc->skb = NULL;
2679 tx_ring_desc->queue_entry = mac_iocb_ptr;
2683 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2686 static void ql_free_tx_resources(struct ql_adapter *qdev,
2687 struct tx_ring *tx_ring)
2689 if (tx_ring->wq_base) {
2690 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2691 tx_ring->wq_base, tx_ring->wq_base_dma);
2692 tx_ring->wq_base = NULL;
2698 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2699 struct tx_ring *tx_ring)
2702 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2703 &tx_ring->wq_base_dma, GFP_ATOMIC);
2705 if (!tx_ring->wq_base ||
2706 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2710 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2717 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2718 tx_ring->wq_base, tx_ring->wq_base_dma);
2719 tx_ring->wq_base = NULL;
2721 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2725 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2727 struct qlge_bq *lbq = &rx_ring->lbq;
2728 unsigned int last_offset;
2730 last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2731 while (lbq->next_to_clean != lbq->next_to_use) {
2732 struct qlge_bq_desc *lbq_desc =
2733 &lbq->queue[lbq->next_to_clean];
2735 if (lbq_desc->p.pg_chunk.offset == last_offset)
2736 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2737 ql_lbq_block_size(qdev),
2739 put_page(lbq_desc->p.pg_chunk.page);
2741 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2744 if (rx_ring->master_chunk.page) {
2745 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2746 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
2747 put_page(rx_ring->master_chunk.page);
2748 rx_ring->master_chunk.page = NULL;
2752 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2756 for (i = 0; i < QLGE_BQ_LEN; i++) {
2757 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2760 netif_err(qdev, ifup, qdev->ndev,
2761 "sbq_desc %d is NULL.\n", i);
2764 if (sbq_desc->p.skb) {
2765 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2768 dev_kfree_skb(sbq_desc->p.skb);
2769 sbq_desc->p.skb = NULL;
2774 /* Free all large and small rx buffers associated
2775 * with the completion queues for this device.
2777 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2781 for (i = 0; i < qdev->rx_ring_count; i++) {
2782 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2784 if (rx_ring->lbq.queue)
2785 ql_free_lbq_buffers(qdev, rx_ring);
2786 if (rx_ring->sbq.queue)
2787 ql_free_sbq_buffers(qdev, rx_ring);
2791 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2795 for (i = 0; i < qdev->rss_ring_count; i++)
2796 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2800 static int qlge_init_bq(struct qlge_bq *bq)
2802 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2803 struct ql_adapter *qdev = rx_ring->qdev;
2804 struct qlge_bq_desc *bq_desc;
2808 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2809 &bq->base_dma, GFP_ATOMIC);
2811 netif_err(qdev, ifup, qdev->ndev,
2812 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2813 bq_type_name[bq->type]);
2817 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2823 bq_desc = &bq->queue[0];
2824 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2825 bq_desc->p.skb = NULL;
2827 bq_desc->buf_ptr = buf_ptr;
2833 static void ql_free_rx_resources(struct ql_adapter *qdev,
2834 struct rx_ring *rx_ring)
2836 /* Free the small buffer queue. */
2837 if (rx_ring->sbq.base) {
2838 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2839 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2840 rx_ring->sbq.base = NULL;
2843 /* Free the small buffer queue control blocks. */
2844 kfree(rx_ring->sbq.queue);
2845 rx_ring->sbq.queue = NULL;
2847 /* Free the large buffer queue. */
2848 if (rx_ring->lbq.base) {
2849 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2850 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2851 rx_ring->lbq.base = NULL;
2854 /* Free the large buffer queue control blocks. */
2855 kfree(rx_ring->lbq.queue);
2856 rx_ring->lbq.queue = NULL;
2858 /* Free the rx queue. */
2859 if (rx_ring->cq_base) {
2860 dma_free_coherent(&qdev->pdev->dev,
2862 rx_ring->cq_base, rx_ring->cq_base_dma);
2863 rx_ring->cq_base = NULL;
2867 /* Allocate queues and buffers for this completions queue based
2868 * on the values in the parameter structure.
2870 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2871 struct rx_ring *rx_ring)
2875 * Allocate the completion queue for this rx_ring.
2878 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2879 &rx_ring->cq_base_dma, GFP_ATOMIC);
2881 if (!rx_ring->cq_base) {
2882 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2886 if (rx_ring->cq_id < qdev->rss_ring_count &&
2887 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2888 ql_free_rx_resources(qdev, rx_ring);
2895 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2897 struct tx_ring *tx_ring;
2898 struct tx_ring_desc *tx_ring_desc;
2902 * Loop through all queues and free
2905 for (j = 0; j < qdev->tx_ring_count; j++) {
2906 tx_ring = &qdev->tx_ring[j];
2907 for (i = 0; i < tx_ring->wq_len; i++) {
2908 tx_ring_desc = &tx_ring->q[i];
2909 if (tx_ring_desc && tx_ring_desc->skb) {
2910 netif_err(qdev, ifdown, qdev->ndev,
2911 "Freeing lost SKB %p, from queue %d, index %d.\n",
2912 tx_ring_desc->skb, j,
2913 tx_ring_desc->index);
2914 ql_unmap_send(qdev, tx_ring_desc,
2915 tx_ring_desc->map_cnt);
2916 dev_kfree_skb(tx_ring_desc->skb);
2917 tx_ring_desc->skb = NULL;
2923 static void ql_free_mem_resources(struct ql_adapter *qdev)
2927 for (i = 0; i < qdev->tx_ring_count; i++)
2928 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2929 for (i = 0; i < qdev->rx_ring_count; i++)
2930 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2931 ql_free_shadow_space(qdev);
2934 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2938 /* Allocate space for our shadow registers and such. */
2939 if (ql_alloc_shadow_space(qdev))
2942 for (i = 0; i < qdev->rx_ring_count; i++) {
2943 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2944 netif_err(qdev, ifup, qdev->ndev,
2945 "RX resource allocation failed.\n");
2949 /* Allocate tx queue resources */
2950 for (i = 0; i < qdev->tx_ring_count; i++) {
2951 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2952 netif_err(qdev, ifup, qdev->ndev,
2953 "TX resource allocation failed.\n");
2960 ql_free_mem_resources(qdev);
2964 /* Set up the rx ring control block and pass it to the chip.
2965 * The control block is defined as
2966 * "Completion Queue Initialization Control Block", or cqicb.
2968 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2970 struct cqicb *cqicb = &rx_ring->cqicb;
2971 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2972 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2973 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2974 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2975 void __iomem *doorbell_area =
2976 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2979 __le64 *base_indirect_ptr;
2982 /* Set up the shadow registers for this ring. */
2983 rx_ring->prod_idx_sh_reg = shadow_reg;
2984 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2985 *rx_ring->prod_idx_sh_reg = 0;
2986 shadow_reg += sizeof(u64);
2987 shadow_reg_dma += sizeof(u64);
2988 rx_ring->lbq.base_indirect = shadow_reg;
2989 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2990 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2991 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2992 rx_ring->sbq.base_indirect = shadow_reg;
2993 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2995 /* PCI doorbell mem area + 0x00 for consumer index register */
2996 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2997 rx_ring->cnsmr_idx = 0;
2998 rx_ring->curr_entry = rx_ring->cq_base;
3000 /* PCI doorbell mem area + 0x04 for valid register */
3001 rx_ring->valid_db_reg = doorbell_area + 0x04;
3003 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3004 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3006 /* PCI doorbell mem area + 0x1c */
3007 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3009 memset((void *)cqicb, 0, sizeof(struct cqicb));
3010 cqicb->msix_vect = rx_ring->irq;
3012 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3015 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3017 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3020 * Set up the control block load flags.
3022 cqicb->flags = FLAGS_LC | /* Load queue base address */
3023 FLAGS_LV | /* Load MSI-X vector */
3024 FLAGS_LI; /* Load irq delay values */
3025 if (rx_ring->cq_id < qdev->rss_ring_count) {
3026 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3027 tmp = (u64)rx_ring->lbq.base_dma;
3028 base_indirect_ptr = rx_ring->lbq.base_indirect;
3031 *base_indirect_ptr = cpu_to_le64(tmp);
3032 tmp += DB_PAGE_SIZE;
3033 base_indirect_ptr++;
3035 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3036 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3037 cqicb->lbq_buf_size =
3038 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3039 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3040 rx_ring->lbq.next_to_use = 0;
3041 rx_ring->lbq.next_to_clean = 0;
3043 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3044 tmp = (u64)rx_ring->sbq.base_dma;
3045 base_indirect_ptr = rx_ring->sbq.base_indirect;
3048 *base_indirect_ptr = cpu_to_le64(tmp);
3049 tmp += DB_PAGE_SIZE;
3050 base_indirect_ptr++;
3052 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3054 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3055 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3056 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3057 rx_ring->sbq.next_to_use = 0;
3058 rx_ring->sbq.next_to_clean = 0;
3060 if (rx_ring->cq_id < qdev->rss_ring_count) {
3061 /* Inbound completion handling rx_rings run in
3062 * separate NAPI contexts.
3064 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3066 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3067 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3069 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3070 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3072 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3073 CFG_LCQ, rx_ring->cq_id);
3075 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3081 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3083 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3084 void __iomem *doorbell_area =
3085 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3086 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3087 (tx_ring->wq_id * sizeof(u64));
3088 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3089 (tx_ring->wq_id * sizeof(u64));
3093 * Assign doorbell registers for this tx_ring.
3095 /* TX PCI doorbell mem area for tx producer index */
3096 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3097 tx_ring->prod_idx = 0;
3098 /* TX PCI doorbell mem area + 0x04 */
3099 tx_ring->valid_db_reg = doorbell_area + 0x04;
3102 * Assign shadow registers for this tx_ring.
3104 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3105 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3107 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3108 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3109 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3110 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3112 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3114 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3116 ql_init_tx_ring(qdev, tx_ring);
3118 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3119 (u16)tx_ring->wq_id);
3121 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3127 static void ql_disable_msix(struct ql_adapter *qdev)
3129 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3130 pci_disable_msix(qdev->pdev);
3131 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3132 kfree(qdev->msi_x_entry);
3133 qdev->msi_x_entry = NULL;
3134 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3135 pci_disable_msi(qdev->pdev);
3136 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3140 /* We start by trying to get the number of vectors
3141 * stored in qdev->intr_count. If we don't get that
3142 * many then we reduce the count and try again.
3144 static void ql_enable_msix(struct ql_adapter *qdev)
3148 /* Get the MSIX vectors. */
3149 if (qlge_irq_type == MSIX_IRQ) {
3150 /* Try to alloc space for the msix struct,
3151 * if it fails then go to MSI/legacy.
3153 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3154 sizeof(struct msix_entry),
3156 if (!qdev->msi_x_entry) {
3157 qlge_irq_type = MSI_IRQ;
3161 for (i = 0; i < qdev->intr_count; i++)
3162 qdev->msi_x_entry[i].entry = i;
3164 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3165 1, qdev->intr_count);
3167 kfree(qdev->msi_x_entry);
3168 qdev->msi_x_entry = NULL;
3169 netif_warn(qdev, ifup, qdev->ndev,
3170 "MSI-X Enable failed, trying MSI.\n");
3171 qlge_irq_type = MSI_IRQ;
3173 qdev->intr_count = err;
3174 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3175 netif_info(qdev, ifup, qdev->ndev,
3176 "MSI-X Enabled, got %d vectors.\n",
3182 qdev->intr_count = 1;
3183 if (qlge_irq_type == MSI_IRQ) {
3184 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3185 set_bit(QL_MSI_ENABLED, &qdev->flags);
3186 netif_info(qdev, ifup, qdev->ndev,
3187 "Running with MSI interrupts.\n");
3191 qlge_irq_type = LEG_IRQ;
3192 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3193 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3194 "Running with legacy interrupts.\n");
3197 /* Each vector services 1 RSS ring and and 1 or more
3198 * TX completion rings. This function loops through
3199 * the TX completion rings and assigns the vector that
3200 * will service it. An example would be if there are
3201 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3202 * This would mean that vector 0 would service RSS ring 0
3203 * and TX completion rings 0,1,2 and 3. Vector 1 would
3204 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3206 static void ql_set_tx_vect(struct ql_adapter *qdev)
3209 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3211 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3212 /* Assign irq vectors to TX rx_rings.*/
3213 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3214 i < qdev->rx_ring_count; i++) {
3215 if (j == tx_rings_per_vector) {
3219 qdev->rx_ring[i].irq = vect;
3223 /* For single vector all rings have an irq
3226 for (i = 0; i < qdev->rx_ring_count; i++)
3227 qdev->rx_ring[i].irq = 0;
3231 /* Set the interrupt mask for this vector. Each vector
3232 * will service 1 RSS ring and 1 or more TX completion
3233 * rings. This function sets up a bit mask per vector
3234 * that indicates which rings it services.
3236 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3238 int j, vect = ctx->intr;
3239 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3241 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3242 /* Add the RSS ring serviced by this vector
3245 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3246 /* Add the TX ring(s) serviced by this vector
3249 for (j = 0; j < tx_rings_per_vector; j++) {
3251 (1 << qdev->rx_ring[qdev->rss_ring_count +
3252 (vect * tx_rings_per_vector) + j].cq_id);
3255 /* For single vector we just shift each queue's
3258 for (j = 0; j < qdev->rx_ring_count; j++)
3259 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3264 * Here we build the intr_context structures based on
3265 * our rx_ring count and intr vector count.
3266 * The intr_context structure is used to hook each vector
3267 * to possibly different handlers.
3269 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3272 struct intr_context *intr_context = &qdev->intr_context[0];
3274 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3275 /* Each rx_ring has it's
3276 * own intr_context since we have separate
3277 * vectors for each queue.
3279 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3280 qdev->rx_ring[i].irq = i;
3281 intr_context->intr = i;
3282 intr_context->qdev = qdev;
3283 /* Set up this vector's bit-mask that indicates
3284 * which queues it services.
3286 ql_set_irq_mask(qdev, intr_context);
3288 * We set up each vectors enable/disable/read bits so
3289 * there's no bit/mask calculations in the critical path.
3291 intr_context->intr_en_mask =
3292 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3293 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3295 intr_context->intr_dis_mask =
3296 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3297 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3299 intr_context->intr_read_mask =
3300 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3301 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3304 /* The first vector/queue handles
3305 * broadcast/multicast, fatal errors,
3306 * and firmware events. This in addition
3307 * to normal inbound NAPI processing.
3309 intr_context->handler = qlge_isr;
3310 sprintf(intr_context->name, "%s-rx-%d",
3311 qdev->ndev->name, i);
3314 * Inbound queues handle unicast frames only.
3316 intr_context->handler = qlge_msix_rx_isr;
3317 sprintf(intr_context->name, "%s-rx-%d",
3318 qdev->ndev->name, i);
3323 * All rx_rings use the same intr_context since
3324 * there is only one vector.
3326 intr_context->intr = 0;
3327 intr_context->qdev = qdev;
3329 * We set up each vectors enable/disable/read bits so
3330 * there's no bit/mask calculations in the critical path.
3332 intr_context->intr_en_mask =
3333 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3334 intr_context->intr_dis_mask =
3335 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3336 INTR_EN_TYPE_DISABLE;
3337 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3338 /* Experience shows that when using INTx interrupts,
3339 * the device does not always auto-mask INTR_EN_EN.
3340 * Moreover, masking INTR_EN_EN manually does not
3341 * immediately prevent interrupt generation.
3343 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3345 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3347 intr_context->intr_read_mask =
3348 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3350 * Single interrupt means one handler for all rings.
3352 intr_context->handler = qlge_isr;
3353 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3354 /* Set up this vector's bit-mask that indicates
3355 * which queues it services. In this case there is
3356 * a single vector so it will service all RSS and
3357 * TX completion rings.
3359 ql_set_irq_mask(qdev, intr_context);
3361 /* Tell the TX completion rings which MSIx vector
3362 * they will be using.
3364 ql_set_tx_vect(qdev);
3367 static void ql_free_irq(struct ql_adapter *qdev)
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3372 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3373 if (intr_context->hooked) {
3374 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3375 free_irq(qdev->msi_x_entry[i].vector,
3378 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3382 ql_disable_msix(qdev);
3385 static int ql_request_irq(struct ql_adapter *qdev)
3389 struct pci_dev *pdev = qdev->pdev;
3390 struct intr_context *intr_context = &qdev->intr_context[0];
3392 ql_resolve_queues_to_irqs(qdev);
3394 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3395 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3396 status = request_irq(qdev->msi_x_entry[i].vector,
3397 intr_context->handler,
3402 netif_err(qdev, ifup, qdev->ndev,
3403 "Failed request for MSIX interrupt %d.\n",
3408 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3409 "trying msi or legacy interrupts.\n");
3410 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3411 "%s: irq = %d.\n", __func__, pdev->irq);
3412 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3413 "%s: context->name = %s.\n", __func__,
3414 intr_context->name);
3415 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3416 "%s: dev_id = 0x%p.\n", __func__,
3419 request_irq(pdev->irq, qlge_isr,
3420 test_bit(QL_MSI_ENABLED, &qdev->flags)
3423 intr_context->name, &qdev->rx_ring[0]);
3427 netif_err(qdev, ifup, qdev->ndev,
3428 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3429 intr_context->name);
3431 intr_context->hooked = 1;
3435 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3440 static int ql_start_rss(struct ql_adapter *qdev)
3442 static const u8 init_hash_seed[] = {
3443 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3444 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3445 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3446 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3447 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3449 struct ricb *ricb = &qdev->ricb;
3452 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3454 memset((void *)ricb, 0, sizeof(*ricb));
3456 ricb->base_cq = RSS_L4K;
3458 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3459 ricb->mask = cpu_to_le16((u16)(0x3ff));
3462 * Fill out the Indirection Table.
3464 for (i = 0; i < 1024; i++)
3465 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3467 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3468 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3470 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3472 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3478 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3482 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3485 /* Clear all the entries in the routing table. */
3486 for (i = 0; i < 16; i++) {
3487 status = ql_set_routing_reg(qdev, i, 0, 0);
3489 netif_err(qdev, ifup, qdev->ndev,
3490 "Failed to init routing register for CAM packets.\n");
3494 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3498 /* Initialize the frame-to-queue routing. */
3499 static int ql_route_initialize(struct ql_adapter *qdev)
3503 /* Clear all the entries in the routing table. */
3504 status = ql_clear_routing_entries(qdev);
3508 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3512 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3513 RT_IDX_IP_CSUM_ERR, 1);
3515 netif_err(qdev, ifup, qdev->ndev,
3516 "Failed to init routing register for IP CSUM error packets.\n");
3519 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3520 RT_IDX_TU_CSUM_ERR, 1);
3522 netif_err(qdev, ifup, qdev->ndev,
3523 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3526 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3528 netif_err(qdev, ifup, qdev->ndev,
3529 "Failed to init routing register for broadcast packets.\n");
3532 /* If we have more than one inbound queue, then turn on RSS in the
3535 if (qdev->rss_ring_count > 1) {
3536 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3537 RT_IDX_RSS_MATCH, 1);
3539 netif_err(qdev, ifup, qdev->ndev,
3540 "Failed to init routing register for MATCH RSS packets.\n");
3545 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3548 netif_err(qdev, ifup, qdev->ndev,
3549 "Failed to init routing register for CAM packets.\n");
3551 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3555 int ql_cam_route_initialize(struct ql_adapter *qdev)
3559 /* If check if the link is up and use to
3560 * determine if we are setting or clearing
3561 * the MAC address in the CAM.
3563 set = ql_read32(qdev, STS);
3564 set &= qdev->port_link_up;
3565 status = ql_set_mac_addr(qdev, set);
3567 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3571 status = ql_route_initialize(qdev);
3573 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3578 static int ql_adapter_initialize(struct ql_adapter *qdev)
3585 * Set up the System register to halt on errors.
3587 value = SYS_EFE | SYS_FAE;
3589 ql_write32(qdev, SYS, mask | value);
3591 /* Set the default queue, and VLAN behavior. */
3592 value = NIC_RCV_CFG_DFQ;
3593 mask = NIC_RCV_CFG_DFQ_MASK;
3594 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3595 value |= NIC_RCV_CFG_RV;
3596 mask |= (NIC_RCV_CFG_RV << 16);
3598 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3600 /* Set the MPI interrupt to enabled. */
3601 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3603 /* Enable the function, set pagesize, enable error checking. */
3604 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3605 FSC_EC | FSC_VM_PAGE_4K;
3606 value |= SPLT_SETTING;
3608 /* Set/clear header splitting. */
3609 mask = FSC_VM_PAGESIZE_MASK |
3610 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3611 ql_write32(qdev, FSC, mask | value);
3613 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3615 /* Set RX packet routing to use port/pci function on which the
3616 * packet arrived on in addition to usual frame routing.
3617 * This is helpful on bonding where both interfaces can have
3618 * the same MAC address.
3620 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3621 /* Reroute all packets to our Interface.
3622 * They may have been routed to MPI firmware
3625 value = ql_read32(qdev, MGMT_RCV_CFG);
3626 value &= ~MGMT_RCV_CFG_RM;
3629 /* Sticky reg needs clearing due to WOL. */
3630 ql_write32(qdev, MGMT_RCV_CFG, mask);
3631 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3633 /* Default WOL is enable on Mezz cards */
3634 if (qdev->pdev->subsystem_device == 0x0068 ||
3635 qdev->pdev->subsystem_device == 0x0180)
3636 qdev->wol = WAKE_MAGIC;
3638 /* Start up the rx queues. */
3639 for (i = 0; i < qdev->rx_ring_count; i++) {
3640 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3642 netif_err(qdev, ifup, qdev->ndev,
3643 "Failed to start rx ring[%d].\n", i);
3648 /* If there is more than one inbound completion queue
3649 * then download a RICB to configure RSS.
3651 if (qdev->rss_ring_count > 1) {
3652 status = ql_start_rss(qdev);
3654 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3659 /* Start up the tx queues. */
3660 for (i = 0; i < qdev->tx_ring_count; i++) {
3661 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3663 netif_err(qdev, ifup, qdev->ndev,
3664 "Failed to start tx ring[%d].\n", i);
3669 /* Initialize the port and set the max framesize. */
3670 status = qdev->nic_ops->port_initialize(qdev);
3672 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3674 /* Set up the MAC address and frame routing filter. */
3675 status = ql_cam_route_initialize(qdev);
3677 netif_err(qdev, ifup, qdev->ndev,
3678 "Failed to init CAM/Routing tables.\n");
3682 /* Start NAPI for the RSS queues. */
3683 for (i = 0; i < qdev->rss_ring_count; i++)
3684 napi_enable(&qdev->rx_ring[i].napi);
3689 /* Issue soft reset to chip. */
3690 static int ql_adapter_reset(struct ql_adapter *qdev)
3694 unsigned long end_jiffies;
3696 /* Clear all the entries in the routing table. */
3697 status = ql_clear_routing_entries(qdev);
3699 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3703 /* Check if bit is set then skip the mailbox command and
3704 * clear the bit, else we are in normal reset process.
3706 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3707 /* Stop management traffic. */
3708 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3710 /* Wait for the NIC and MGMNT FIFOs to empty. */
3711 ql_wait_fifo_empty(qdev);
3713 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3716 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3718 end_jiffies = jiffies + usecs_to_jiffies(30);
3720 value = ql_read32(qdev, RST_FO);
3721 if ((value & RST_FO_FR) == 0)
3724 } while (time_before(jiffies, end_jiffies));
3726 if (value & RST_FO_FR) {
3727 netif_err(qdev, ifdown, qdev->ndev,
3728 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3729 status = -ETIMEDOUT;
3732 /* Resume management traffic. */
3733 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3737 static void ql_display_dev_info(struct net_device *ndev)
3739 struct ql_adapter *qdev = netdev_priv(ndev);
3741 netif_info(qdev, probe, qdev->ndev,
3742 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3743 "XG Roll = %d, XG Rev = %d.\n",
3746 qdev->chip_rev_id & 0x0000000f,
3747 qdev->chip_rev_id >> 4 & 0x0000000f,
3748 qdev->chip_rev_id >> 8 & 0x0000000f,
3749 qdev->chip_rev_id >> 12 & 0x0000000f);
3750 netif_info(qdev, probe, qdev->ndev,
3751 "MAC address %pM\n", ndev->dev_addr);
3754 static int ql_wol(struct ql_adapter *qdev)
3757 u32 wol = MB_WOL_DISABLE;
3759 /* The CAM is still intact after a reset, but if we
3760 * are doing WOL, then we may need to program the
3761 * routing regs. We would also need to issue the mailbox
3762 * commands to instruct the MPI what to do per the ethtool
3766 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3767 WAKE_MCAST | WAKE_BCAST)) {
3768 netif_err(qdev, ifdown, qdev->ndev,
3769 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3774 if (qdev->wol & WAKE_MAGIC) {
3775 status = ql_mb_wol_set_magic(qdev, 1);
3777 netif_err(qdev, ifdown, qdev->ndev,
3778 "Failed to set magic packet on %s.\n",
3782 netif_info(qdev, drv, qdev->ndev,
3783 "Enabled magic packet successfully on %s.\n",
3786 wol |= MB_WOL_MAGIC_PKT;
3790 wol |= MB_WOL_MODE_ON;
3791 status = ql_mb_wol_mode(qdev, wol);
3792 netif_err(qdev, drv, qdev->ndev,
3793 "WOL %s (wol code 0x%x) on %s\n",
3794 (status == 0) ? "Successfully set" : "Failed",
3795 wol, qdev->ndev->name);
3801 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3804 /* Don't kill the reset worker thread if we
3805 * are in the process of recovery.
3807 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3808 cancel_delayed_work_sync(&qdev->asic_reset_work);
3809 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3810 cancel_delayed_work_sync(&qdev->mpi_work);
3811 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3812 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3813 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3816 static int ql_adapter_down(struct ql_adapter *qdev)
3822 ql_cancel_all_work_sync(qdev);
3824 for (i = 0; i < qdev->rss_ring_count; i++)
3825 napi_disable(&qdev->rx_ring[i].napi);
3827 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3829 ql_disable_interrupts(qdev);
3831 ql_tx_ring_clean(qdev);
3833 /* Call netif_napi_del() from common point.
3835 for (i = 0; i < qdev->rss_ring_count; i++)
3836 netif_napi_del(&qdev->rx_ring[i].napi);
3838 status = ql_adapter_reset(qdev);
3840 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3842 ql_free_rx_buffers(qdev);
3847 static int ql_adapter_up(struct ql_adapter *qdev)
3851 err = ql_adapter_initialize(qdev);
3853 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3856 set_bit(QL_ADAPTER_UP, &qdev->flags);
3857 ql_alloc_rx_buffers(qdev);
3858 /* If the port is initialized and the
3859 * link is up the turn on the carrier.
3861 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3862 (ql_read32(qdev, STS) & qdev->port_link_up))
3864 /* Restore rx mode. */
3865 clear_bit(QL_ALLMULTI, &qdev->flags);
3866 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3867 qlge_set_multicast_list(qdev->ndev);
3869 /* Restore vlan setting. */
3870 qlge_restore_vlan(qdev);
3872 ql_enable_interrupts(qdev);
3873 ql_enable_all_completion_interrupts(qdev);
3874 netif_tx_start_all_queues(qdev->ndev);
3878 ql_adapter_reset(qdev);
3882 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3884 ql_free_mem_resources(qdev);
3888 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3890 if (ql_alloc_mem_resources(qdev)) {
3891 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3894 return ql_request_irq(qdev);
3897 static int qlge_close(struct net_device *ndev)
3899 struct ql_adapter *qdev = netdev_priv(ndev);
3902 /* If we hit pci_channel_io_perm_failure
3903 * failure condition, then we already
3904 * brought the adapter down.
3906 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3907 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3908 clear_bit(QL_EEH_FATAL, &qdev->flags);
3913 * Wait for device to recover from a reset.
3914 * (Rarely happens, but possible.)
3916 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3919 /* Make sure refill_work doesn't re-enable napi */
3920 for (i = 0; i < qdev->rss_ring_count; i++)
3921 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3923 ql_adapter_down(qdev);
3924 ql_release_adapter_resources(qdev);
3928 static void qlge_set_lb_size(struct ql_adapter *qdev)
3930 if (qdev->ndev->mtu <= 1500)
3931 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3933 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3934 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3937 static int ql_configure_rings(struct ql_adapter *qdev)
3940 struct rx_ring *rx_ring;
3941 struct tx_ring *tx_ring;
3942 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3944 /* In a perfect world we have one RSS ring for each CPU
3945 * and each has it's own vector. To do that we ask for
3946 * cpu_cnt vectors. ql_enable_msix() will adjust the
3947 * vector count to what we actually get. We then
3948 * allocate an RSS ring for each.
3949 * Essentially, we are doing min(cpu_count, msix_vector_count).
3951 qdev->intr_count = cpu_cnt;
3952 ql_enable_msix(qdev);
3953 /* Adjust the RSS ring count to the actual vector count. */
3954 qdev->rss_ring_count = qdev->intr_count;
3955 qdev->tx_ring_count = cpu_cnt;
3956 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3958 for (i = 0; i < qdev->tx_ring_count; i++) {
3959 tx_ring = &qdev->tx_ring[i];
3960 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3961 tx_ring->qdev = qdev;
3963 tx_ring->wq_len = qdev->tx_ring_size;
3965 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3968 * The completion queue ID for the tx rings start
3969 * immediately after the rss rings.
3971 tx_ring->cq_id = qdev->rss_ring_count + i;
3974 for (i = 0; i < qdev->rx_ring_count; i++) {
3975 rx_ring = &qdev->rx_ring[i];
3976 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3977 rx_ring->qdev = qdev;
3979 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3980 if (i < qdev->rss_ring_count) {
3982 * Inbound (RSS) queues.
3984 rx_ring->cq_len = qdev->rx_ring_size;
3986 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3987 rx_ring->lbq.type = QLGE_LB;
3988 rx_ring->sbq.type = QLGE_SB;
3989 INIT_DELAYED_WORK(&rx_ring->refill_work,
3993 * Outbound queue handles outbound completions only.
3995 /* outbound cq is same size as tx_ring it services. */
3996 rx_ring->cq_len = qdev->tx_ring_size;
3998 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4004 static int qlge_open(struct net_device *ndev)
4007 struct ql_adapter *qdev = netdev_priv(ndev);
4009 err = ql_adapter_reset(qdev);
4013 qlge_set_lb_size(qdev);
4014 err = ql_configure_rings(qdev);
4018 err = ql_get_adapter_resources(qdev);
4022 err = ql_adapter_up(qdev);
4029 ql_release_adapter_resources(qdev);
4033 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4037 /* Wait for an outstanding reset to complete. */
4038 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4041 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4042 netif_err(qdev, ifup, qdev->ndev,
4043 "Waiting for adapter UP...\n");
4048 netif_err(qdev, ifup, qdev->ndev,
4049 "Timed out waiting for adapter UP\n");
4054 status = ql_adapter_down(qdev);
4058 qlge_set_lb_size(qdev);
4060 status = ql_adapter_up(qdev);
4066 netif_alert(qdev, ifup, qdev->ndev,
4067 "Driver up/down cycle failed, closing device.\n");
4068 set_bit(QL_ADAPTER_UP, &qdev->flags);
4069 dev_close(qdev->ndev);
4073 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4075 struct ql_adapter *qdev = netdev_priv(ndev);
4078 if (ndev->mtu == 1500 && new_mtu == 9000)
4079 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4080 else if (ndev->mtu == 9000 && new_mtu == 1500)
4081 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4085 queue_delayed_work(qdev->workqueue,
4086 &qdev->mpi_port_cfg_work, 3 * HZ);
4088 ndev->mtu = new_mtu;
4090 if (!netif_running(qdev->ndev))
4093 status = ql_change_rx_buffers(qdev);
4095 netif_err(qdev, ifup, qdev->ndev,
4096 "Changing MTU failed.\n");
4102 static struct net_device_stats *qlge_get_stats(struct net_device
4105 struct ql_adapter *qdev = netdev_priv(ndev);
4106 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4107 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4108 unsigned long pkts, mcast, dropped, errors, bytes;
4112 pkts = mcast = dropped = errors = bytes = 0;
4113 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4114 pkts += rx_ring->rx_packets;
4115 bytes += rx_ring->rx_bytes;
4116 dropped += rx_ring->rx_dropped;
4117 errors += rx_ring->rx_errors;
4118 mcast += rx_ring->rx_multicast;
4120 ndev->stats.rx_packets = pkts;
4121 ndev->stats.rx_bytes = bytes;
4122 ndev->stats.rx_dropped = dropped;
4123 ndev->stats.rx_errors = errors;
4124 ndev->stats.multicast = mcast;
4127 pkts = errors = bytes = 0;
4128 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4129 pkts += tx_ring->tx_packets;
4130 bytes += tx_ring->tx_bytes;
4131 errors += tx_ring->tx_errors;
4133 ndev->stats.tx_packets = pkts;
4134 ndev->stats.tx_bytes = bytes;
4135 ndev->stats.tx_errors = errors;
4136 return &ndev->stats;
4139 static void qlge_set_multicast_list(struct net_device *ndev)
4141 struct ql_adapter *qdev = netdev_priv(ndev);
4142 struct netdev_hw_addr *ha;
4145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4149 * Set or clear promiscuous mode if a
4150 * transition is taking place.
4152 if (ndev->flags & IFF_PROMISC) {
4153 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4154 if (ql_set_routing_reg
4155 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4156 netif_err(qdev, hw, qdev->ndev,
4157 "Failed to set promiscuous mode.\n");
4159 set_bit(QL_PROMISCUOUS, &qdev->flags);
4163 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4164 if (ql_set_routing_reg
4165 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4166 netif_err(qdev, hw, qdev->ndev,
4167 "Failed to clear promiscuous mode.\n");
4169 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4175 * Set or clear all multicast mode if a
4176 * transition is taking place.
4178 if ((ndev->flags & IFF_ALLMULTI) ||
4179 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4180 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4181 if (ql_set_routing_reg
4182 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4183 netif_err(qdev, hw, qdev->ndev,
4184 "Failed to set all-multi mode.\n");
4186 set_bit(QL_ALLMULTI, &qdev->flags);
4190 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4191 if (ql_set_routing_reg
4192 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4193 netif_err(qdev, hw, qdev->ndev,
4194 "Failed to clear all-multi mode.\n");
4196 clear_bit(QL_ALLMULTI, &qdev->flags);
4201 if (!netdev_mc_empty(ndev)) {
4202 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4206 netdev_for_each_mc_addr(ha, ndev) {
4207 if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4208 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4209 netif_err(qdev, hw, qdev->ndev,
4210 "Failed to loadmulticast address.\n");
4211 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4216 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4217 if (ql_set_routing_reg
4218 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4219 netif_err(qdev, hw, qdev->ndev,
4220 "Failed to set multicast match mode.\n");
4222 set_bit(QL_ALLMULTI, &qdev->flags);
4226 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4229 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4231 struct ql_adapter *qdev = netdev_priv(ndev);
4232 struct sockaddr *addr = p;
4235 if (!is_valid_ether_addr(addr->sa_data))
4236 return -EADDRNOTAVAIL;
4237 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4238 /* Update local copy of current mac address. */
4239 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4241 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4244 status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4245 MAC_ADDR_TYPE_CAM_MAC,
4246 qdev->func * MAX_CQ);
4248 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4249 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4253 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4255 struct ql_adapter *qdev = netdev_priv(ndev);
4257 ql_queue_asic_error(qdev);
4260 static void ql_asic_reset_work(struct work_struct *work)
4262 struct ql_adapter *qdev =
4263 container_of(work, struct ql_adapter, asic_reset_work.work);
4267 status = ql_adapter_down(qdev);
4271 status = ql_adapter_up(qdev);
4275 /* Restore rx mode. */
4276 clear_bit(QL_ALLMULTI, &qdev->flags);
4277 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4278 qlge_set_multicast_list(qdev->ndev);
4283 netif_alert(qdev, ifup, qdev->ndev,
4284 "Driver up/down cycle failed, closing device\n");
4286 set_bit(QL_ADAPTER_UP, &qdev->flags);
4287 dev_close(qdev->ndev);
4291 static const struct nic_operations qla8012_nic_ops = {
4292 .get_flash = ql_get_8012_flash_params,
4293 .port_initialize = ql_8012_port_initialize,
4296 static const struct nic_operations qla8000_nic_ops = {
4297 .get_flash = ql_get_8000_flash_params,
4298 .port_initialize = ql_8000_port_initialize,
4301 /* Find the pcie function number for the other NIC
4302 * on this chip. Since both NIC functions share a
4303 * common firmware we have the lowest enabled function
4304 * do any common work. Examples would be resetting
4305 * after a fatal firmware error, or doing a firmware
4308 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4312 u32 nic_func1, nic_func2;
4314 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4319 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4320 MPI_TEST_NIC_FUNC_MASK);
4321 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4322 MPI_TEST_NIC_FUNC_MASK);
4324 if (qdev->func == nic_func1)
4325 qdev->alt_func = nic_func2;
4326 else if (qdev->func == nic_func2)
4327 qdev->alt_func = nic_func1;
4334 static int ql_get_board_info(struct ql_adapter *qdev)
4339 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4343 status = ql_get_alt_pcie_func(qdev);
4347 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4349 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4350 qdev->port_link_up = STS_PL1;
4351 qdev->port_init = STS_PI1;
4352 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4353 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4355 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4356 qdev->port_link_up = STS_PL0;
4357 qdev->port_init = STS_PI0;
4358 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4359 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4361 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4362 qdev->device_id = qdev->pdev->device;
4363 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4364 qdev->nic_ops = &qla8012_nic_ops;
4365 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4366 qdev->nic_ops = &qla8000_nic_ops;
4370 static void ql_release_all(struct pci_dev *pdev)
4372 struct net_device *ndev = pci_get_drvdata(pdev);
4373 struct ql_adapter *qdev = netdev_priv(ndev);
4375 if (qdev->workqueue) {
4376 destroy_workqueue(qdev->workqueue);
4377 qdev->workqueue = NULL;
4381 iounmap(qdev->reg_base);
4382 if (qdev->doorbell_area)
4383 iounmap(qdev->doorbell_area);
4384 vfree(qdev->mpi_coredump);
4385 pci_release_regions(pdev);
4388 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4391 struct ql_adapter *qdev = netdev_priv(ndev);
4394 memset((void *)qdev, 0, sizeof(*qdev));
4395 err = pci_enable_device(pdev);
4397 dev_err(&pdev->dev, "PCI device enable failed.\n");
4403 pci_set_drvdata(pdev, ndev);
4405 /* Set PCIe read request size */
4406 err = pcie_set_readrq(pdev, 4096);
4408 dev_err(&pdev->dev, "Set readrq failed.\n");
4412 err = pci_request_regions(pdev, DRV_NAME);
4414 dev_err(&pdev->dev, "PCI region request failed.\n");
4418 pci_set_master(pdev);
4419 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4420 set_bit(QL_DMA64, &qdev->flags);
4421 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4423 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4425 err = dma_set_coherent_mask(&pdev->dev,
4430 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4434 /* Set PCIe reset type for EEH to fundamental. */
4435 pdev->needs_freset = 1;
4436 pci_save_state(pdev);
4438 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4439 if (!qdev->reg_base) {
4440 dev_err(&pdev->dev, "Register mapping failed.\n");
4445 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4446 qdev->doorbell_area =
4447 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4448 if (!qdev->doorbell_area) {
4449 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4454 err = ql_get_board_info(qdev);
4456 dev_err(&pdev->dev, "Register access failed.\n");
4460 qdev->msg_enable = netif_msg_init(debug, default_msg);
4461 spin_lock_init(&qdev->stats_lock);
4463 if (qlge_mpi_coredump) {
4464 qdev->mpi_coredump =
4465 vmalloc(sizeof(struct ql_mpi_coredump));
4466 if (!qdev->mpi_coredump) {
4470 if (qlge_force_coredump)
4471 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4473 /* make sure the EEPROM is good */
4474 err = qdev->nic_ops->get_flash(qdev);
4476 dev_err(&pdev->dev, "Invalid FLASH.\n");
4480 /* Keep local copy of current mac address. */
4481 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4483 /* Set up the default ring sizes. */
4484 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4485 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4487 /* Set up the coalescing parameters. */
4488 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4489 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4490 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4491 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4494 * Set up the operating parameters.
4496 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4498 if (!qdev->workqueue) {
4503 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4504 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4505 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4506 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4507 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4508 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4509 init_completion(&qdev->ide_completion);
4510 mutex_init(&qdev->mpi_mutex);
4513 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4514 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4515 DRV_NAME, DRV_VERSION);
4519 ql_release_all(pdev);
4521 pci_disable_device(pdev);
4525 static const struct net_device_ops qlge_netdev_ops = {
4526 .ndo_open = qlge_open,
4527 .ndo_stop = qlge_close,
4528 .ndo_start_xmit = qlge_send,
4529 .ndo_change_mtu = qlge_change_mtu,
4530 .ndo_get_stats = qlge_get_stats,
4531 .ndo_set_rx_mode = qlge_set_multicast_list,
4532 .ndo_set_mac_address = qlge_set_mac_address,
4533 .ndo_validate_addr = eth_validate_addr,
4534 .ndo_tx_timeout = qlge_tx_timeout,
4535 .ndo_set_features = qlge_set_features,
4536 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4537 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4540 static void ql_timer(struct timer_list *t)
4542 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4545 var = ql_read32(qdev, STS);
4546 if (pci_channel_offline(qdev->pdev)) {
4547 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4551 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4554 static int qlge_probe(struct pci_dev *pdev,
4555 const struct pci_device_id *pci_entry)
4557 struct net_device *ndev = NULL;
4558 struct ql_adapter *qdev = NULL;
4559 static int cards_found;
4562 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4564 netif_get_num_default_rss_queues()));
4568 err = ql_init_device(pdev, ndev, cards_found);
4574 qdev = netdev_priv(ndev);
4575 SET_NETDEV_DEV(ndev, &pdev->dev);
4576 ndev->hw_features = NETIF_F_SG |
4580 NETIF_F_HW_VLAN_CTAG_TX |
4581 NETIF_F_HW_VLAN_CTAG_RX |
4582 NETIF_F_HW_VLAN_CTAG_FILTER |
4584 ndev->features = ndev->hw_features;
4585 ndev->vlan_features = ndev->hw_features;
4586 /* vlan gets same features (except vlan filter) */
4587 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4588 NETIF_F_HW_VLAN_CTAG_TX |
4589 NETIF_F_HW_VLAN_CTAG_RX);
4591 if (test_bit(QL_DMA64, &qdev->flags))
4592 ndev->features |= NETIF_F_HIGHDMA;
4595 * Set up net_device structure.
4597 ndev->tx_queue_len = qdev->tx_ring_size;
4598 ndev->irq = pdev->irq;
4600 ndev->netdev_ops = &qlge_netdev_ops;
4601 ndev->ethtool_ops = &qlge_ethtool_ops;
4602 ndev->watchdog_timeo = 10 * HZ;
4604 /* MTU range: this driver only supports 1500 or 9000, so this only
4605 * filters out values above or below, and we'll rely on
4606 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4608 ndev->min_mtu = ETH_DATA_LEN;
4609 ndev->max_mtu = 9000;
4611 err = register_netdev(ndev);
4613 dev_err(&pdev->dev, "net device registration failed.\n");
4614 ql_release_all(pdev);
4615 pci_disable_device(pdev);
4619 /* Start up the timer to trigger EEH if
4622 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4623 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4625 ql_display_dev_info(ndev);
4626 atomic_set(&qdev->lb_count, 0);
4631 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4633 return qlge_send(skb, ndev);
4636 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4638 return ql_clean_inbound_rx_ring(rx_ring, budget);
4641 static void qlge_remove(struct pci_dev *pdev)
4643 struct net_device *ndev = pci_get_drvdata(pdev);
4644 struct ql_adapter *qdev = netdev_priv(ndev);
4646 del_timer_sync(&qdev->timer);
4647 ql_cancel_all_work_sync(qdev);
4648 unregister_netdev(ndev);
4649 ql_release_all(pdev);
4650 pci_disable_device(pdev);
4654 /* Clean up resources without touching hardware. */
4655 static void ql_eeh_close(struct net_device *ndev)
4658 struct ql_adapter *qdev = netdev_priv(ndev);
4660 if (netif_carrier_ok(ndev)) {
4661 netif_carrier_off(ndev);
4662 netif_stop_queue(ndev);
4665 /* Disabling the timer */
4666 ql_cancel_all_work_sync(qdev);
4668 for (i = 0; i < qdev->rss_ring_count; i++)
4669 netif_napi_del(&qdev->rx_ring[i].napi);
4671 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4672 ql_tx_ring_clean(qdev);
4673 ql_free_rx_buffers(qdev);
4674 ql_release_adapter_resources(qdev);
4678 * This callback is called by the PCI subsystem whenever
4679 * a PCI bus error is detected.
4681 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4682 pci_channel_state_t state)
4684 struct net_device *ndev = pci_get_drvdata(pdev);
4685 struct ql_adapter *qdev = netdev_priv(ndev);
4688 case pci_channel_io_normal:
4689 return PCI_ERS_RESULT_CAN_RECOVER;
4690 case pci_channel_io_frozen:
4691 netif_device_detach(ndev);
4692 del_timer_sync(&qdev->timer);
4693 if (netif_running(ndev))
4695 pci_disable_device(pdev);
4696 return PCI_ERS_RESULT_NEED_RESET;
4697 case pci_channel_io_perm_failure:
4699 "%s: pci_channel_io_perm_failure.\n", __func__);
4700 del_timer_sync(&qdev->timer);
4702 set_bit(QL_EEH_FATAL, &qdev->flags);
4703 return PCI_ERS_RESULT_DISCONNECT;
4706 /* Request a slot reset. */
4707 return PCI_ERS_RESULT_NEED_RESET;
4711 * This callback is called after the PCI buss has been reset.
4712 * Basically, this tries to restart the card from scratch.
4713 * This is a shortened version of the device probe/discovery code,
4714 * it resembles the first-half of the () routine.
4716 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4718 struct net_device *ndev = pci_get_drvdata(pdev);
4719 struct ql_adapter *qdev = netdev_priv(ndev);
4721 pdev->error_state = pci_channel_io_normal;
4723 pci_restore_state(pdev);
4724 if (pci_enable_device(pdev)) {
4725 netif_err(qdev, ifup, qdev->ndev,
4726 "Cannot re-enable PCI device after reset.\n");
4727 return PCI_ERS_RESULT_DISCONNECT;
4729 pci_set_master(pdev);
4731 if (ql_adapter_reset(qdev)) {
4732 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4733 set_bit(QL_EEH_FATAL, &qdev->flags);
4734 return PCI_ERS_RESULT_DISCONNECT;
4737 return PCI_ERS_RESULT_RECOVERED;
4740 static void qlge_io_resume(struct pci_dev *pdev)
4742 struct net_device *ndev = pci_get_drvdata(pdev);
4743 struct ql_adapter *qdev = netdev_priv(ndev);
4746 if (netif_running(ndev)) {
4747 err = qlge_open(ndev);
4749 netif_err(qdev, ifup, qdev->ndev,
4750 "Device initialization failed after reset.\n");
4754 netif_err(qdev, ifup, qdev->ndev,
4755 "Device was not running prior to EEH.\n");
4757 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4758 netif_device_attach(ndev);
4761 static const struct pci_error_handlers qlge_err_handler = {
4762 .error_detected = qlge_io_error_detected,
4763 .slot_reset = qlge_io_slot_reset,
4764 .resume = qlge_io_resume,
4767 static int __maybe_unused qlge_suspend(struct device *dev_d)
4769 struct net_device *ndev = dev_get_drvdata(dev_d);
4770 struct ql_adapter *qdev = netdev_priv(ndev);
4773 netif_device_detach(ndev);
4774 del_timer_sync(&qdev->timer);
4776 if (netif_running(ndev)) {
4777 err = ql_adapter_down(qdev);
4787 static int __maybe_unused qlge_resume(struct device *dev_d)
4789 struct net_device *ndev = dev_get_drvdata(dev_d);
4790 struct ql_adapter *qdev = netdev_priv(ndev);
4793 pci_set_master(to_pci_dev(dev_d));
4795 device_wakeup_disable(dev_d);
4797 if (netif_running(ndev)) {
4798 err = ql_adapter_up(qdev);
4803 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4804 netif_device_attach(ndev);
4809 static void qlge_shutdown(struct pci_dev *pdev)
4811 qlge_suspend(&pdev->dev);
4814 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4816 static struct pci_driver qlge_driver = {
4818 .id_table = qlge_pci_tbl,
4819 .probe = qlge_probe,
4820 .remove = qlge_remove,
4821 .driver.pm = &qlge_pm_ops,
4822 .shutdown = qlge_shutdown,
4823 .err_handler = &qlge_err_handler
4826 module_pci_driver(qlge_driver);