Merge tag 'tty-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux-2.6-microblaze.git] / drivers / staging / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = -1;  /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80                 "Option to enable MPI firmware dump. "
81                 "Default is OFF - Do Not allocate memory. ");
82
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86                 "Option to allow force of firmware core dump. "
87                 "Default is OFF - Do not allow.");
88
89 static const struct pci_device_id qlge_pci_tbl[] = {
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92         /* required last entry */
93         {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
102
103 /* This hardware semaphore causes exclusive access to
104  * resources shared between the NIC driver, MPI firmware,
105  * FCOE firmware and the FC driver.
106  */
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109         u32 sem_bits = 0;
110
111         switch (sem_mask) {
112         case SEM_XGMAC0_MASK:
113                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114                 break;
115         case SEM_XGMAC1_MASK:
116                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117                 break;
118         case SEM_ICB_MASK:
119                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120                 break;
121         case SEM_MAC_ADDR_MASK:
122                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123                 break;
124         case SEM_FLASH_MASK:
125                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126                 break;
127         case SEM_PROBE_MASK:
128                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129                 break;
130         case SEM_RT_IDX_MASK:
131                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132                 break;
133         case SEM_PROC_REG_MASK:
134                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135                 break;
136         default:
137                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138                 return -EINVAL;
139         }
140
141         ql_write32(qdev, SEM, sem_bits | sem_mask);
142         return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147         unsigned int wait_count = 30;
148         do {
149                 if (!ql_sem_trylock(qdev, sem_mask))
150                         return 0;
151                 udelay(100);
152         } while (--wait_count);
153         return -ETIMEDOUT;
154 }
155
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158         ql_write32(qdev, SEM, sem_mask);
159         ql_read32(qdev, SEM);   /* flush */
160 }
161
162 /* This function waits for a specific bit to come ready
163  * in a given register.  It is used mostly by the initialize
164  * process, but is also used in kernel thread API such as
165  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166  */
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169         u32 temp;
170         int count;
171
172         for (count = 0; count < UDELAY_COUNT; count++) {
173                 temp = ql_read32(qdev, reg);
174
175                 /* check for errors */
176                 if (temp & err_bit) {
177                         netif_alert(qdev, probe, qdev->ndev,
178                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
179                                     reg, temp);
180                         return -EIO;
181                 } else if (temp & bit)
182                         return 0;
183                 udelay(UDELAY_DELAY);
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count;
196         u32 temp;
197
198         for (count = 0; count < UDELAY_COUNT; count++) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205         }
206         return -ETIMEDOUT;
207 }
208
209
210 /* Used to issue init control blocks to hw. Maps control block,
211  * sets address, triggers download, waits for completion.
212  */
213 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214                  u16 q_id)
215 {
216         u64 map;
217         int status = 0;
218         int direction;
219         u32 mask;
220         u32 value;
221
222         direction =
223             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
224             PCI_DMA_FROMDEVICE;
225
226         map = pci_map_single(qdev->pdev, ptr, size, direction);
227         if (pci_dma_mapping_error(qdev->pdev, map)) {
228                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
229                 return -ENOMEM;
230         }
231
232         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
233         if (status)
234                 return status;
235
236         status = ql_wait_cfg(qdev, bit);
237         if (status) {
238                 netif_err(qdev, ifup, qdev->ndev,
239                           "Timed out waiting for CFG to come ready.\n");
240                 goto exit;
241         }
242
243         ql_write32(qdev, ICB_L, (u32) map);
244         ql_write32(qdev, ICB_H, (u32) (map >> 32));
245
246         mask = CFG_Q_MASK | (bit << 16);
247         value = bit | (q_id << CFG_Q_SHIFT);
248         ql_write32(qdev, CFG, (mask | value));
249
250         /*
251          * Wait for the bit to clear after signaling hw.
252          */
253         status = ql_wait_cfg(qdev, bit);
254 exit:
255         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
256         pci_unmap_single(qdev->pdev, map, size, direction);
257         return status;
258 }
259
260 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
261 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
262                         u32 *value)
263 {
264         u32 offset = 0;
265         int status;
266
267         switch (type) {
268         case MAC_ADDR_TYPE_MULTI_MAC:
269         case MAC_ADDR_TYPE_CAM_MAC:
270                 {
271                         status =
272                             ql_wait_reg_rdy(qdev,
273                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274                         if (status)
275                                 goto exit;
276                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
278                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279                         status =
280                             ql_wait_reg_rdy(qdev,
281                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
282                         if (status)
283                                 goto exit;
284                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285                         status =
286                             ql_wait_reg_rdy(qdev,
287                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
288                         if (status)
289                                 goto exit;
290                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
292                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293                         status =
294                             ql_wait_reg_rdy(qdev,
295                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
296                         if (status)
297                                 goto exit;
298                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
300                                 status =
301                                     ql_wait_reg_rdy(qdev,
302                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
303                                 if (status)
304                                         goto exit;
305                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
306                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
307                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308                                 status =
309                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
310                                                     MAC_ADDR_MR, 0);
311                                 if (status)
312                                         goto exit;
313                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
314                         }
315                         break;
316                 }
317         case MAC_ADDR_TYPE_VLAN:
318         case MAC_ADDR_TYPE_MULTI_FLTR:
319         default:
320                 netif_crit(qdev, ifup, qdev->ndev,
321                            "Address type %d not yet supported.\n", type);
322                 status = -EPERM;
323         }
324 exit:
325         return status;
326 }
327
328 /* Set up a MAC, multicast or VLAN address for the
329  * inbound frame matching.
330  */
331 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
332                                u16 index)
333 {
334         u32 offset = 0;
335         int status = 0;
336
337         switch (type) {
338         case MAC_ADDR_TYPE_MULTI_MAC:
339                 {
340                         u32 upper = (addr[0] << 8) | addr[1];
341                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
342                                         (addr[4] << 8) | (addr[5]);
343
344                         status =
345                                 ql_wait_reg_rdy(qdev,
346                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347                         if (status)
348                                 goto exit;
349                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350                                 (index << MAC_ADDR_IDX_SHIFT) |
351                                 type | MAC_ADDR_E);
352                         ql_write32(qdev, MAC_ADDR_DATA, lower);
353                         status =
354                                 ql_wait_reg_rdy(qdev,
355                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356                         if (status)
357                                 goto exit;
358                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
359                                 (index << MAC_ADDR_IDX_SHIFT) |
360                                 type | MAC_ADDR_E);
361
362                         ql_write32(qdev, MAC_ADDR_DATA, upper);
363                         status =
364                                 ql_wait_reg_rdy(qdev,
365                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366                         if (status)
367                                 goto exit;
368                         break;
369                 }
370         case MAC_ADDR_TYPE_CAM_MAC:
371                 {
372                         u32 cam_output;
373                         u32 upper = (addr[0] << 8) | addr[1];
374                         u32 lower =
375                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
376                             (addr[5]);
377                         status =
378                             ql_wait_reg_rdy(qdev,
379                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
380                         if (status)
381                                 goto exit;
382                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
383                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
384                                    type);       /* type */
385                         ql_write32(qdev, MAC_ADDR_DATA, lower);
386                         status =
387                             ql_wait_reg_rdy(qdev,
388                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
389                         if (status)
390                                 goto exit;
391                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
392                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
393                                    type);       /* type */
394                         ql_write32(qdev, MAC_ADDR_DATA, upper);
395                         status =
396                             ql_wait_reg_rdy(qdev,
397                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
398                         if (status)
399                                 goto exit;
400                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
401                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
402                                    type);       /* type */
403                         /* This field should also include the queue id
404                            and possibly the function id.  Right now we hardcode
405                            the route field to NIC core.
406                          */
407                         cam_output = (CAM_OUT_ROUTE_NIC |
408                                       (qdev->
409                                        func << CAM_OUT_FUNC_SHIFT) |
410                                         (0 << CAM_OUT_CQ_ID_SHIFT));
411                         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
412                                 cam_output |= CAM_OUT_RV;
413                         /* route to NIC core */
414                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
415                         break;
416                 }
417         case MAC_ADDR_TYPE_VLAN:
418                 {
419                         u32 enable_bit = *((u32 *) &addr[0]);
420                         /* For VLAN, the addr actually holds a bit that
421                          * either enables or disables the vlan id we are
422                          * addressing. It's either MAC_ADDR_E on or off.
423                          * That's bit-27 we're talking about.
424                          */
425                         status =
426                             ql_wait_reg_rdy(qdev,
427                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
428                         if (status)
429                                 goto exit;
430                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
431                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
432                                    type |       /* type */
433                                    enable_bit); /* enable/disable */
434                         break;
435                 }
436         case MAC_ADDR_TYPE_MULTI_FLTR:
437         default:
438                 netif_crit(qdev, ifup, qdev->ndev,
439                            "Address type %d not yet supported.\n", type);
440                 status = -EPERM;
441         }
442 exit:
443         return status;
444 }
445
446 /* Set or clear MAC address in hardware. We sometimes
447  * have to clear it to prevent wrong frame routing
448  * especially in a bonding environment.
449  */
450 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
451 {
452         int status;
453         char zero_mac_addr[ETH_ALEN];
454         char *addr;
455
456         if (set) {
457                 addr = &qdev->current_mac_addr[0];
458                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
459                              "Set Mac addr %pM\n", addr);
460         } else {
461                 eth_zero_addr(zero_mac_addr);
462                 addr = &zero_mac_addr[0];
463                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
464                              "Clearing MAC address\n");
465         }
466         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
467         if (status)
468                 return status;
469         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
470                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
471         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
472         if (status)
473                 netif_err(qdev, ifup, qdev->ndev,
474                           "Failed to init mac address.\n");
475         return status;
476 }
477
478 void ql_link_on(struct ql_adapter *qdev)
479 {
480         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
481         netif_carrier_on(qdev->ndev);
482         ql_set_mac_addr(qdev, 1);
483 }
484
485 void ql_link_off(struct ql_adapter *qdev)
486 {
487         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
488         netif_carrier_off(qdev->ndev);
489         ql_set_mac_addr(qdev, 0);
490 }
491
492 /* Get a specific frame routing value from the CAM.
493  * Used for debug and reg dump.
494  */
495 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
496 {
497         int status = 0;
498
499         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
500         if (status)
501                 goto exit;
502
503         ql_write32(qdev, RT_IDX,
504                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
505         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
506         if (status)
507                 goto exit;
508         *value = ql_read32(qdev, RT_DATA);
509 exit:
510         return status;
511 }
512
513 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
514  * to route different frame types to various inbound queues.  We send broadcast/
515  * multicast/error frames to the default queue for slow handling,
516  * and CAM hit/RSS frames to the fast handling queues.
517  */
518 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
519                               int enable)
520 {
521         int status = -EINVAL; /* Return error if no mask match. */
522         u32 value = 0;
523
524         switch (mask) {
525         case RT_IDX_CAM_HIT:
526                 {
527                         value = RT_IDX_DST_CAM_Q |      /* dest */
528                             RT_IDX_TYPE_NICQ |  /* type */
529                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
530                         break;
531                 }
532         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
533                 {
534                         value = RT_IDX_DST_DFLT_Q |     /* dest */
535                             RT_IDX_TYPE_NICQ |  /* type */
536                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
537                         break;
538                 }
539         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
540                 {
541                         value = RT_IDX_DST_DFLT_Q |     /* dest */
542                             RT_IDX_TYPE_NICQ |  /* type */
543                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
544                         break;
545                 }
546         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
547                 {
548                         value = RT_IDX_DST_DFLT_Q | /* dest */
549                                 RT_IDX_TYPE_NICQ | /* type */
550                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
551                                 RT_IDX_IDX_SHIFT); /* index */
552                         break;
553                 }
554         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
555                 {
556                         value = RT_IDX_DST_DFLT_Q | /* dest */
557                                 RT_IDX_TYPE_NICQ | /* type */
558                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
559                                 RT_IDX_IDX_SHIFT); /* index */
560                         break;
561                 }
562         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
563                 {
564                         value = RT_IDX_DST_DFLT_Q |     /* dest */
565                             RT_IDX_TYPE_NICQ |  /* type */
566                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
567                         break;
568                 }
569         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
570                 {
571                         value = RT_IDX_DST_DFLT_Q |     /* dest */
572                             RT_IDX_TYPE_NICQ |  /* type */
573                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
574                         break;
575                 }
576         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
577                 {
578                         value = RT_IDX_DST_DFLT_Q |     /* dest */
579                             RT_IDX_TYPE_NICQ |  /* type */
580                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
581                         break;
582                 }
583         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
584                 {
585                         value = RT_IDX_DST_RSS |        /* dest */
586                             RT_IDX_TYPE_NICQ |  /* type */
587                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588                         break;
589                 }
590         case 0:         /* Clear the E-bit on an entry. */
591                 {
592                         value = RT_IDX_DST_DFLT_Q |     /* dest */
593                             RT_IDX_TYPE_NICQ |  /* type */
594                             (index << RT_IDX_IDX_SHIFT);/* index */
595                         break;
596                 }
597         default:
598                 netif_err(qdev, ifup, qdev->ndev,
599                           "Mask type %d not yet supported.\n", mask);
600                 status = -EPERM;
601                 goto exit;
602         }
603
604         if (value) {
605                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
606                 if (status)
607                         goto exit;
608                 value |= (enable ? RT_IDX_E : 0);
609                 ql_write32(qdev, RT_IDX, value);
610                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
611         }
612 exit:
613         return status;
614 }
615
616 static void ql_enable_interrupts(struct ql_adapter *qdev)
617 {
618         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
619 }
620
621 static void ql_disable_interrupts(struct ql_adapter *qdev)
622 {
623         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
624 }
625
626 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
627 {
628         struct intr_context *ctx = &qdev->intr_context[intr];
629
630         ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
631 }
632
633 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635         struct intr_context *ctx = &qdev->intr_context[intr];
636
637         ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
638 }
639
640 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
641 {
642         int i;
643
644         for (i = 0; i < qdev->intr_count; i++)
645                 ql_enable_completion_interrupt(qdev, i);
646 }
647
648 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
649 {
650         int status, i;
651         u16 csum = 0;
652         __le16 *flash = (__le16 *)&qdev->flash;
653
654         status = strncmp((char *)&qdev->flash, str, 4);
655         if (status) {
656                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
657                 return  status;
658         }
659
660         for (i = 0; i < size; i++)
661                 csum += le16_to_cpu(*flash++);
662
663         if (csum)
664                 netif_err(qdev, ifup, qdev->ndev,
665                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
666
667         return csum;
668 }
669
670 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
671 {
672         int status = 0;
673         /* wait for reg to come ready */
674         status = ql_wait_reg_rdy(qdev,
675                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
676         if (status)
677                 goto exit;
678         /* set up for reg read */
679         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
680         /* wait for reg to come ready */
681         status = ql_wait_reg_rdy(qdev,
682                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
683         if (status)
684                 goto exit;
685          /* This data is stored on flash as an array of
686          * __le32.  Since ql_read32() returns cpu endian
687          * we need to swap it back.
688          */
689         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
690 exit:
691         return status;
692 }
693
694 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
695 {
696         u32 i, size;
697         int status;
698         __le32 *p = (__le32 *)&qdev->flash;
699         u32 offset;
700         u8 mac_addr[6];
701
702         /* Get flash offset for function and adjust
703          * for dword access.
704          */
705         if (!qdev->port)
706                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
707         else
708                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
709
710         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
711                 return -ETIMEDOUT;
712
713         size = sizeof(struct flash_params_8000) / sizeof(u32);
714         for (i = 0; i < size; i++, p++) {
715                 status = ql_read_flash_word(qdev, i+offset, p);
716                 if (status) {
717                         netif_err(qdev, ifup, qdev->ndev,
718                                   "Error reading flash.\n");
719                         goto exit;
720                 }
721         }
722
723         status = ql_validate_flash(qdev,
724                         sizeof(struct flash_params_8000) / sizeof(u16),
725                         "8000");
726         if (status) {
727                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
728                 status = -EINVAL;
729                 goto exit;
730         }
731
732         /* Extract either manufacturer or BOFM modified
733          * MAC address.
734          */
735         if (qdev->flash.flash_params_8000.data_type1 == 2)
736                 memcpy(mac_addr,
737                         qdev->flash.flash_params_8000.mac_addr1,
738                         qdev->ndev->addr_len);
739         else
740                 memcpy(mac_addr,
741                         qdev->flash.flash_params_8000.mac_addr,
742                         qdev->ndev->addr_len);
743
744         if (!is_valid_ether_addr(mac_addr)) {
745                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
746                 status = -EINVAL;
747                 goto exit;
748         }
749
750         memcpy(qdev->ndev->dev_addr,
751                 mac_addr,
752                 qdev->ndev->addr_len);
753
754 exit:
755         ql_sem_unlock(qdev, SEM_FLASH_MASK);
756         return status;
757 }
758
759 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
760 {
761         int i;
762         int status;
763         __le32 *p = (__le32 *)&qdev->flash;
764         u32 offset = 0;
765         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
766
767         /* Second function's parameters follow the first
768          * function's.
769          */
770         if (qdev->port)
771                 offset = size;
772
773         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
774                 return -ETIMEDOUT;
775
776         for (i = 0; i < size; i++, p++) {
777                 status = ql_read_flash_word(qdev, i+offset, p);
778                 if (status) {
779                         netif_err(qdev, ifup, qdev->ndev,
780                                   "Error reading flash.\n");
781                         goto exit;
782                 }
783
784         }
785
786         status = ql_validate_flash(qdev,
787                         sizeof(struct flash_params_8012) / sizeof(u16),
788                         "8012");
789         if (status) {
790                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
791                 status = -EINVAL;
792                 goto exit;
793         }
794
795         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
796                 status = -EINVAL;
797                 goto exit;
798         }
799
800         memcpy(qdev->ndev->dev_addr,
801                 qdev->flash.flash_params_8012.mac_addr,
802                 qdev->ndev->addr_len);
803
804 exit:
805         ql_sem_unlock(qdev, SEM_FLASH_MASK);
806         return status;
807 }
808
809 /* xgmac register are located behind the xgmac_addr and xgmac_data
810  * register pair.  Each read/write requires us to wait for the ready
811  * bit before reading/writing the data.
812  */
813 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
814 {
815         int status;
816         /* wait for reg to come ready */
817         status = ql_wait_reg_rdy(qdev,
818                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
819         if (status)
820                 return status;
821         /* write the data to the data reg */
822         ql_write32(qdev, XGMAC_DATA, data);
823         /* trigger the write */
824         ql_write32(qdev, XGMAC_ADDR, reg);
825         return status;
826 }
827
828 /* xgmac register are located behind the xgmac_addr and xgmac_data
829  * register pair.  Each read/write requires us to wait for the ready
830  * bit before reading/writing the data.
831  */
832 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
833 {
834         int status = 0;
835         /* wait for reg to come ready */
836         status = ql_wait_reg_rdy(qdev,
837                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
838         if (status)
839                 goto exit;
840         /* set up for reg read */
841         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
842         /* wait for reg to come ready */
843         status = ql_wait_reg_rdy(qdev,
844                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
845         if (status)
846                 goto exit;
847         /* get the data */
848         *data = ql_read32(qdev, XGMAC_DATA);
849 exit:
850         return status;
851 }
852
853 /* This is used for reading the 64-bit statistics regs. */
854 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
855 {
856         int status = 0;
857         u32 hi = 0;
858         u32 lo = 0;
859
860         status = ql_read_xgmac_reg(qdev, reg, &lo);
861         if (status)
862                 goto exit;
863
864         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
865         if (status)
866                 goto exit;
867
868         *data = (u64) lo | ((u64) hi << 32);
869
870 exit:
871         return status;
872 }
873
874 static int ql_8000_port_initialize(struct ql_adapter *qdev)
875 {
876         int status;
877         /*
878          * Get MPI firmware version for driver banner
879          * and ethool info.
880          */
881         status = ql_mb_about_fw(qdev);
882         if (status)
883                 goto exit;
884         status = ql_mb_get_fw_state(qdev);
885         if (status)
886                 goto exit;
887         /* Wake up a worker to get/set the TX/RX frame sizes. */
888         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
889 exit:
890         return status;
891 }
892
893 /* Take the MAC Core out of reset.
894  * Enable statistics counting.
895  * Take the transmitter/receiver out of reset.
896  * This functionality may be done in the MPI firmware at a
897  * later date.
898  */
899 static int ql_8012_port_initialize(struct ql_adapter *qdev)
900 {
901         int status = 0;
902         u32 data;
903
904         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
905                 /* Another function has the semaphore, so
906                  * wait for the port init bit to come ready.
907                  */
908                 netif_info(qdev, link, qdev->ndev,
909                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
910                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
911                 if (status) {
912                         netif_crit(qdev, link, qdev->ndev,
913                                    "Port initialize timed out.\n");
914                 }
915                 return status;
916         }
917
918         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
919         /* Set the core reset. */
920         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
921         if (status)
922                 goto end;
923         data |= GLOBAL_CFG_RESET;
924         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
925         if (status)
926                 goto end;
927
928         /* Clear the core reset and turn on jumbo for receiver. */
929         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
930         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
931         data |= GLOBAL_CFG_TX_STAT_EN;
932         data |= GLOBAL_CFG_RX_STAT_EN;
933         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
934         if (status)
935                 goto end;
936
937         /* Enable transmitter, and clear it's reset. */
938         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
939         if (status)
940                 goto end;
941         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
942         data |= TX_CFG_EN;      /* Enable the transmitter. */
943         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
944         if (status)
945                 goto end;
946
947         /* Enable receiver and clear it's reset. */
948         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
949         if (status)
950                 goto end;
951         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
952         data |= RX_CFG_EN;      /* Enable the receiver. */
953         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
954         if (status)
955                 goto end;
956
957         /* Turn on jumbo. */
958         status =
959             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
960         if (status)
961                 goto end;
962         status =
963             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
964         if (status)
965                 goto end;
966
967         /* Signal to the world that the port is enabled.        */
968         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
969 end:
970         ql_sem_unlock(qdev, qdev->xg_sem_mask);
971         return status;
972 }
973
974 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
975 {
976         return PAGE_SIZE << qdev->lbq_buf_order;
977 }
978
979 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
980 {
981         struct qlge_bq_desc *bq_desc;
982
983         bq_desc = &bq->queue[bq->next_to_clean];
984         bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
985
986         return bq_desc;
987 }
988
989 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
990                                                struct rx_ring *rx_ring)
991 {
992         struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
993
994         pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
995                                     qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
996
997         if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
998             ql_lbq_block_size(qdev)) {
999                 /* last chunk of the master page */
1000                 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1001                                ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
1002         }
1003
1004         return lbq_desc;
1005 }
1006
1007 /* Update an rx ring index. */
1008 static void ql_update_cq(struct rx_ring *rx_ring)
1009 {
1010         rx_ring->cnsmr_idx++;
1011         rx_ring->curr_entry++;
1012         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1013                 rx_ring->cnsmr_idx = 0;
1014                 rx_ring->curr_entry = rx_ring->cq_base;
1015         }
1016 }
1017
1018 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1019 {
1020         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1021 }
1022
1023 static const char * const bq_type_name[] = {
1024         [QLGE_SB] = "sbq",
1025         [QLGE_LB] = "lbq",
1026 };
1027
1028 /* return 0 or negative error */
1029 static int qlge_refill_sb(struct rx_ring *rx_ring,
1030                           struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1031 {
1032         struct ql_adapter *qdev = rx_ring->qdev;
1033         struct sk_buff *skb;
1034
1035         if (sbq_desc->p.skb)
1036                 return 0;
1037
1038         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1039                      "ring %u sbq: getting new skb for index %d.\n",
1040                      rx_ring->cq_id, sbq_desc->index);
1041
1042         skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1043         if (!skb)
1044                 return -ENOMEM;
1045         skb_reserve(skb, QLGE_SB_PAD);
1046
1047         sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
1048                                             SMALL_BUF_MAP_SIZE,
1049                                             PCI_DMA_FROMDEVICE);
1050         if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
1051                 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1052                 dev_kfree_skb_any(skb);
1053                 return -EIO;
1054         }
1055         *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1056
1057         sbq_desc->p.skb = skb;
1058         return 0;
1059 }
1060
1061 /* return 0 or negative error */
1062 static int qlge_refill_lb(struct rx_ring *rx_ring,
1063                           struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1064 {
1065         struct ql_adapter *qdev = rx_ring->qdev;
1066         struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1067
1068         if (!master_chunk->page) {
1069                 struct page *page;
1070                 dma_addr_t dma_addr;
1071
1072                 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1073                 if (unlikely(!page))
1074                         return -ENOMEM;
1075                 dma_addr = pci_map_page(qdev->pdev, page, 0,
1076                                         ql_lbq_block_size(qdev),
1077                                         PCI_DMA_FROMDEVICE);
1078                 if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
1079                         __free_pages(page, qdev->lbq_buf_order);
1080                         netif_err(qdev, drv, qdev->ndev,
1081                                   "PCI mapping failed.\n");
1082                         return -EIO;
1083                 }
1084                 master_chunk->page = page;
1085                 master_chunk->va = page_address(page);
1086                 master_chunk->offset = 0;
1087                 rx_ring->chunk_dma_addr = dma_addr;
1088         }
1089
1090         lbq_desc->p.pg_chunk = *master_chunk;
1091         lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1092         *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1093                                          lbq_desc->p.pg_chunk.offset);
1094
1095         /* Adjust the master page chunk for next
1096          * buffer get.
1097          */
1098         master_chunk->offset += qdev->lbq_buf_size;
1099         if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1100                 master_chunk->page = NULL;
1101         } else {
1102                 master_chunk->va += qdev->lbq_buf_size;
1103                 get_page(master_chunk->page);
1104         }
1105
1106         return 0;
1107 }
1108
1109 /* return 0 or negative error */
1110 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1111 {
1112         struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1113         struct ql_adapter *qdev = rx_ring->qdev;
1114         struct qlge_bq_desc *bq_desc;
1115         int refill_count;
1116         int retval;
1117         int i;
1118
1119         refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1120                                     bq->next_to_use);
1121         if (!refill_count)
1122                 return 0;
1123
1124         i = bq->next_to_use;
1125         bq_desc = &bq->queue[i];
1126         i -= QLGE_BQ_LEN;
1127         do {
1128                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1129                              "ring %u %s: try cleaning idx %d\n",
1130                              rx_ring->cq_id, bq_type_name[bq->type], i);
1131
1132                 if (bq->type == QLGE_SB)
1133                         retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1134                 else
1135                         retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1136                 if (retval < 0) {
1137                         netif_err(qdev, ifup, qdev->ndev,
1138                                   "ring %u %s: Could not get a page chunk, idx %d\n",
1139                                   rx_ring->cq_id, bq_type_name[bq->type], i);
1140                         break;
1141                 }
1142
1143                 bq_desc++;
1144                 i++;
1145                 if (unlikely(!i)) {
1146                         bq_desc = &bq->queue[0];
1147                         i -= QLGE_BQ_LEN;
1148                 }
1149                 refill_count--;
1150         } while (refill_count);
1151         i += QLGE_BQ_LEN;
1152
1153         if (bq->next_to_use != i) {
1154                 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1155                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1156                                      "ring %u %s: updating prod idx = %d.\n",
1157                                      rx_ring->cq_id, bq_type_name[bq->type],
1158                                      i);
1159                         ql_write_db_reg(i, bq->prod_idx_db_reg);
1160                 }
1161                 bq->next_to_use = i;
1162         }
1163
1164         return retval;
1165 }
1166
1167 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1168                                     unsigned long delay)
1169 {
1170         bool sbq_fail, lbq_fail;
1171
1172         sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1173         lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1174
1175         /* Minimum number of buffers needed to be able to receive at least one
1176          * frame of any format:
1177          * sbq: 1 for header + 1 for data
1178          * lbq: mtu 9000 / lb size
1179          * Below this, the queue might stall.
1180          */
1181         if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1182             (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1183              DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1184                 /* Allocations can take a long time in certain cases (ex.
1185                  * reclaim). Therefore, use a workqueue for long-running
1186                  * work items.
1187                  */
1188                 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1189                                       &rx_ring->refill_work, delay);
1190 }
1191
1192 static void qlge_slow_refill(struct work_struct *work)
1193 {
1194         struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1195                                                refill_work.work);
1196         struct napi_struct *napi = &rx_ring->napi;
1197
1198         napi_disable(napi);
1199         ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1200         napi_enable(napi);
1201
1202         local_bh_disable();
1203         /* napi_disable() might have prevented incomplete napi work from being
1204          * rescheduled.
1205          */
1206         napi_schedule(napi);
1207         /* trigger softirq processing */
1208         local_bh_enable();
1209 }
1210
1211 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1212  * fails at some stage, or from the interrupt when a tx completes.
1213  */
1214 static void ql_unmap_send(struct ql_adapter *qdev,
1215                           struct tx_ring_desc *tx_ring_desc, int mapped)
1216 {
1217         int i;
1218         for (i = 0; i < mapped; i++) {
1219                 if (i == 0 || (i == 7 && mapped > 7)) {
1220                         /*
1221                          * Unmap the skb->data area, or the
1222                          * external sglist (AKA the Outbound
1223                          * Address List (OAL)).
1224                          * If its the zeroeth element, then it's
1225                          * the skb->data area.  If it's the 7th
1226                          * element and there is more than 6 frags,
1227                          * then its an OAL.
1228                          */
1229                         if (i == 7) {
1230                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1231                                              qdev->ndev,
1232                                              "unmapping OAL area.\n");
1233                         }
1234                         pci_unmap_single(qdev->pdev,
1235                                          dma_unmap_addr(&tx_ring_desc->map[i],
1236                                                         mapaddr),
1237                                          dma_unmap_len(&tx_ring_desc->map[i],
1238                                                        maplen),
1239                                          PCI_DMA_TODEVICE);
1240                 } else {
1241                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1242                                      "unmapping frag %d.\n", i);
1243                         pci_unmap_page(qdev->pdev,
1244                                        dma_unmap_addr(&tx_ring_desc->map[i],
1245                                                       mapaddr),
1246                                        dma_unmap_len(&tx_ring_desc->map[i],
1247                                                      maplen), PCI_DMA_TODEVICE);
1248                 }
1249         }
1250
1251 }
1252
1253 /* Map the buffers for this transmit.  This will return
1254  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1255  */
1256 static int ql_map_send(struct ql_adapter *qdev,
1257                        struct ob_mac_iocb_req *mac_iocb_ptr,
1258                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1259 {
1260         int len = skb_headlen(skb);
1261         dma_addr_t map;
1262         int frag_idx, err, map_idx = 0;
1263         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1264         int frag_cnt = skb_shinfo(skb)->nr_frags;
1265
1266         if (frag_cnt) {
1267                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1268                              "frag_cnt = %d.\n", frag_cnt);
1269         }
1270         /*
1271          * Map the skb buffer first.
1272          */
1273         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1274
1275         err = pci_dma_mapping_error(qdev->pdev, map);
1276         if (err) {
1277                 netif_err(qdev, tx_queued, qdev->ndev,
1278                           "PCI mapping failed with error: %d\n", err);
1279
1280                 return NETDEV_TX_BUSY;
1281         }
1282
1283         tbd->len = cpu_to_le32(len);
1284         tbd->addr = cpu_to_le64(map);
1285         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1286         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1287         map_idx++;
1288
1289         /*
1290          * This loop fills the remainder of the 8 address descriptors
1291          * in the IOCB.  If there are more than 7 fragments, then the
1292          * eighth address desc will point to an external list (OAL).
1293          * When this happens, the remainder of the frags will be stored
1294          * in this list.
1295          */
1296         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1297                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1298                 tbd++;
1299                 if (frag_idx == 6 && frag_cnt > 7) {
1300                         /* Let's tack on an sglist.
1301                          * Our control block will now
1302                          * look like this:
1303                          * iocb->seg[0] = skb->data
1304                          * iocb->seg[1] = frag[0]
1305                          * iocb->seg[2] = frag[1]
1306                          * iocb->seg[3] = frag[2]
1307                          * iocb->seg[4] = frag[3]
1308                          * iocb->seg[5] = frag[4]
1309                          * iocb->seg[6] = frag[5]
1310                          * iocb->seg[7] = ptr to OAL (external sglist)
1311                          * oal->seg[0] = frag[6]
1312                          * oal->seg[1] = frag[7]
1313                          * oal->seg[2] = frag[8]
1314                          * oal->seg[3] = frag[9]
1315                          * oal->seg[4] = frag[10]
1316                          *      etc...
1317                          */
1318                         /* Tack on the OAL in the eighth segment of IOCB. */
1319                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1320                                              sizeof(struct oal),
1321                                              PCI_DMA_TODEVICE);
1322                         err = pci_dma_mapping_error(qdev->pdev, map);
1323                         if (err) {
1324                                 netif_err(qdev, tx_queued, qdev->ndev,
1325                                           "PCI mapping outbound address list with error: %d\n",
1326                                           err);
1327                                 goto map_error;
1328                         }
1329
1330                         tbd->addr = cpu_to_le64(map);
1331                         /*
1332                          * The length is the number of fragments
1333                          * that remain to be mapped times the length
1334                          * of our sglist (OAL).
1335                          */
1336                         tbd->len =
1337                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1338                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1339                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1340                                            map);
1341                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1342                                           sizeof(struct oal));
1343                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1344                         map_idx++;
1345                 }
1346
1347                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1348                                        DMA_TO_DEVICE);
1349
1350                 err = dma_mapping_error(&qdev->pdev->dev, map);
1351                 if (err) {
1352                         netif_err(qdev, tx_queued, qdev->ndev,
1353                                   "PCI mapping frags failed with error: %d.\n",
1354                                   err);
1355                         goto map_error;
1356                 }
1357
1358                 tbd->addr = cpu_to_le64(map);
1359                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1360                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1361                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1362                                   skb_frag_size(frag));
1363
1364         }
1365         /* Save the number of segments we've mapped. */
1366         tx_ring_desc->map_cnt = map_idx;
1367         /* Terminate the last segment. */
1368         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1369         return NETDEV_TX_OK;
1370
1371 map_error:
1372         /*
1373          * If the first frag mapping failed, then i will be zero.
1374          * This causes the unmap of the skb->data area.  Otherwise
1375          * we pass in the number of frags that mapped successfully
1376          * so they can be umapped.
1377          */
1378         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1379         return NETDEV_TX_BUSY;
1380 }
1381
1382 /* Categorizing receive firmware frame errors */
1383 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1384                                  struct rx_ring *rx_ring)
1385 {
1386         struct nic_stats *stats = &qdev->nic_stats;
1387
1388         stats->rx_err_count++;
1389         rx_ring->rx_errors++;
1390
1391         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1392         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1393                 stats->rx_code_err++;
1394                 break;
1395         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1396                 stats->rx_oversize_err++;
1397                 break;
1398         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1399                 stats->rx_undersize_err++;
1400                 break;
1401         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1402                 stats->rx_preamble_err++;
1403                 break;
1404         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1405                 stats->rx_frame_len_err++;
1406                 break;
1407         case IB_MAC_IOCB_RSP_ERR_CRC:
1408                 stats->rx_crc_err++;
1409         default:
1410                 break;
1411         }
1412 }
1413
1414 /**
1415  * ql_update_mac_hdr_len - helper routine to update the mac header length
1416  * based on vlan tags if present
1417  */
1418 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1419                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1420                                   void *page, size_t *len)
1421 {
1422         u16 *tags;
1423
1424         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1425                 return;
1426         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1427                 tags = (u16 *)page;
1428                 /* Look for stacked vlan tags in ethertype field */
1429                 if (tags[6] == ETH_P_8021Q &&
1430                     tags[8] == ETH_P_8021Q)
1431                         *len += 2 * VLAN_HLEN;
1432                 else
1433                         *len += VLAN_HLEN;
1434         }
1435 }
1436
1437 /* Process an inbound completion from an rx ring. */
1438 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1439                                         struct rx_ring *rx_ring,
1440                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1441                                         u32 length,
1442                                         u16 vlan_id)
1443 {
1444         struct sk_buff *skb;
1445         struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1446         struct napi_struct *napi = &rx_ring->napi;
1447
1448         /* Frame error, so drop the packet. */
1449         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1450                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1451                 put_page(lbq_desc->p.pg_chunk.page);
1452                 return;
1453         }
1454         napi->dev = qdev->ndev;
1455
1456         skb = napi_get_frags(napi);
1457         if (!skb) {
1458                 netif_err(qdev, drv, qdev->ndev,
1459                           "Couldn't get an skb, exiting.\n");
1460                 rx_ring->rx_dropped++;
1461                 put_page(lbq_desc->p.pg_chunk.page);
1462                 return;
1463         }
1464         prefetch(lbq_desc->p.pg_chunk.va);
1465         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1466                              lbq_desc->p.pg_chunk.page,
1467                              lbq_desc->p.pg_chunk.offset,
1468                              length);
1469
1470         skb->len += length;
1471         skb->data_len += length;
1472         skb->truesize += length;
1473         skb_shinfo(skb)->nr_frags++;
1474
1475         rx_ring->rx_packets++;
1476         rx_ring->rx_bytes += length;
1477         skb->ip_summed = CHECKSUM_UNNECESSARY;
1478         skb_record_rx_queue(skb, rx_ring->cq_id);
1479         if (vlan_id != 0xffff)
1480                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1481         napi_gro_frags(napi);
1482 }
1483
1484 /* Process an inbound completion from an rx ring. */
1485 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1486                                         struct rx_ring *rx_ring,
1487                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1488                                         u32 length,
1489                                         u16 vlan_id)
1490 {
1491         struct net_device *ndev = qdev->ndev;
1492         struct sk_buff *skb = NULL;
1493         void *addr;
1494         struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1495         struct napi_struct *napi = &rx_ring->napi;
1496         size_t hlen = ETH_HLEN;
1497
1498         skb = netdev_alloc_skb(ndev, length);
1499         if (!skb) {
1500                 rx_ring->rx_dropped++;
1501                 put_page(lbq_desc->p.pg_chunk.page);
1502                 return;
1503         }
1504
1505         addr = lbq_desc->p.pg_chunk.va;
1506         prefetch(addr);
1507
1508         /* Frame error, so drop the packet. */
1509         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1510                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1511                 goto err_out;
1512         }
1513
1514         /* Update the MAC header length*/
1515         ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1516
1517         /* The max framesize filter on this chip is set higher than
1518          * MTU since FCoE uses 2k frames.
1519          */
1520         if (skb->len > ndev->mtu + hlen) {
1521                 netif_err(qdev, drv, qdev->ndev,
1522                           "Segment too small, dropping.\n");
1523                 rx_ring->rx_dropped++;
1524                 goto err_out;
1525         }
1526         skb_put_data(skb, addr, hlen);
1527         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1528                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1529                      length);
1530         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1531                                 lbq_desc->p.pg_chunk.offset + hlen,
1532                                 length - hlen);
1533         skb->len += length - hlen;
1534         skb->data_len += length - hlen;
1535         skb->truesize += length - hlen;
1536
1537         rx_ring->rx_packets++;
1538         rx_ring->rx_bytes += skb->len;
1539         skb->protocol = eth_type_trans(skb, ndev);
1540         skb_checksum_none_assert(skb);
1541
1542         if ((ndev->features & NETIF_F_RXCSUM) &&
1543                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1544                 /* TCP frame. */
1545                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1546                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1547                                      "TCP checksum done!\n");
1548                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1549                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1550                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1551                         /* Unfragmented ipv4 UDP frame. */
1552                         struct iphdr *iph =
1553                                 (struct iphdr *)((u8 *)addr + hlen);
1554                         if (!(iph->frag_off &
1555                                 htons(IP_MF|IP_OFFSET))) {
1556                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1557                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1558                                              qdev->ndev,
1559                                              "UDP checksum done!\n");
1560                         }
1561                 }
1562         }
1563
1564         skb_record_rx_queue(skb, rx_ring->cq_id);
1565         if (vlan_id != 0xffff)
1566                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1567         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1568                 napi_gro_receive(napi, skb);
1569         else
1570                 netif_receive_skb(skb);
1571         return;
1572 err_out:
1573         dev_kfree_skb_any(skb);
1574         put_page(lbq_desc->p.pg_chunk.page);
1575 }
1576
1577 /* Process an inbound completion from an rx ring. */
1578 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1579                                         struct rx_ring *rx_ring,
1580                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1581                                         u32 length,
1582                                         u16 vlan_id)
1583 {
1584         struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1585         struct net_device *ndev = qdev->ndev;
1586         struct sk_buff *skb, *new_skb;
1587
1588         skb = sbq_desc->p.skb;
1589         /* Allocate new_skb and copy */
1590         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1591         if (!new_skb) {
1592                 rx_ring->rx_dropped++;
1593                 return;
1594         }
1595         skb_reserve(new_skb, NET_IP_ALIGN);
1596
1597         pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
1598                                     SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1599
1600         skb_put_data(new_skb, skb->data, length);
1601
1602         skb = new_skb;
1603
1604         /* Frame error, so drop the packet. */
1605         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1606                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1607                 dev_kfree_skb_any(skb);
1608                 return;
1609         }
1610
1611         /* loopback self test for ethtool */
1612         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1613                 ql_check_lb_frame(qdev, skb);
1614                 dev_kfree_skb_any(skb);
1615                 return;
1616         }
1617
1618         /* The max framesize filter on this chip is set higher than
1619          * MTU since FCoE uses 2k frames.
1620          */
1621         if (skb->len > ndev->mtu + ETH_HLEN) {
1622                 dev_kfree_skb_any(skb);
1623                 rx_ring->rx_dropped++;
1624                 return;
1625         }
1626
1627         prefetch(skb->data);
1628         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1629                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630                              "%s Multicast.\n",
1631                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1633                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1635                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1636                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1637         }
1638         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1639                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1640                              "Promiscuous Packet.\n");
1641
1642         rx_ring->rx_packets++;
1643         rx_ring->rx_bytes += skb->len;
1644         skb->protocol = eth_type_trans(skb, ndev);
1645         skb_checksum_none_assert(skb);
1646
1647         /* If rx checksum is on, and there are no
1648          * csum or frame errors.
1649          */
1650         if ((ndev->features & NETIF_F_RXCSUM) &&
1651                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1652                 /* TCP frame. */
1653                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1654                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1655                                      "TCP checksum done!\n");
1656                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1657                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1658                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1659                         /* Unfragmented ipv4 UDP frame. */
1660                         struct iphdr *iph = (struct iphdr *) skb->data;
1661                         if (!(iph->frag_off &
1662                                 htons(IP_MF|IP_OFFSET))) {
1663                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1664                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1665                                              qdev->ndev,
1666                                              "UDP checksum done!\n");
1667                         }
1668                 }
1669         }
1670
1671         skb_record_rx_queue(skb, rx_ring->cq_id);
1672         if (vlan_id != 0xffff)
1673                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1674         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1675                 napi_gro_receive(&rx_ring->napi, skb);
1676         else
1677                 netif_receive_skb(skb);
1678 }
1679
1680 static void ql_realign_skb(struct sk_buff *skb, int len)
1681 {
1682         void *temp_addr = skb->data;
1683
1684         /* Undo the skb_reserve(skb,32) we did before
1685          * giving to hardware, and realign data on
1686          * a 2-byte boundary.
1687          */
1688         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1689         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1690         memmove(skb->data, temp_addr, len);
1691 }
1692
1693 /*
1694  * This function builds an skb for the given inbound
1695  * completion.  It will be rewritten for readability in the near
1696  * future, but for not it works well.
1697  */
1698 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1699                                        struct rx_ring *rx_ring,
1700                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1701 {
1702         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1703         u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1704         struct qlge_bq_desc *lbq_desc, *sbq_desc;
1705         struct sk_buff *skb = NULL;
1706         size_t hlen = ETH_HLEN;
1707
1708         /*
1709          * Handle the header buffer if present.
1710          */
1711         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1712             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714                              "Header of %d bytes in small buffer.\n", hdr_len);
1715                 /*
1716                  * Headers fit nicely into a small buffer.
1717                  */
1718                 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1719                 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1720                                  SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1721                 skb = sbq_desc->p.skb;
1722                 ql_realign_skb(skb, hdr_len);
1723                 skb_put(skb, hdr_len);
1724                 sbq_desc->p.skb = NULL;
1725         }
1726
1727         /*
1728          * Handle the data buffer(s).
1729          */
1730         if (unlikely(!length)) {        /* Is there data too? */
1731                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732                              "No Data buffer in this packet.\n");
1733                 return skb;
1734         }
1735
1736         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1737                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1738                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739                                      "Headers in small, data of %d bytes in small, combine them.\n",
1740                                      length);
1741                         /*
1742                          * Data is less than small buffer size so it's
1743                          * stuffed in a small buffer.
1744                          * For this case we append the data
1745                          * from the "data" small buffer to the "header" small
1746                          * buffer.
1747                          */
1748                         sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1749                         pci_dma_sync_single_for_cpu(qdev->pdev,
1750                                                     sbq_desc->dma_addr,
1751                                                     SMALL_BUF_MAP_SIZE,
1752                                                     PCI_DMA_FROMDEVICE);
1753                         skb_put_data(skb, sbq_desc->p.skb->data, length);
1754                 } else {
1755                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1756                                      "%d bytes in a single small buffer.\n",
1757                                      length);
1758                         sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1759                         skb = sbq_desc->p.skb;
1760                         ql_realign_skb(skb, length);
1761                         skb_put(skb, length);
1762                         pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1763                                          SMALL_BUF_MAP_SIZE,
1764                                          PCI_DMA_FROMDEVICE);
1765                         sbq_desc->p.skb = NULL;
1766                 }
1767         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1768                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1769                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770                                      "Header in small, %d bytes in large. Chain large to small!\n",
1771                                      length);
1772                         /*
1773                          * The data is in a single large buffer.  We
1774                          * chain it to the header buffer's skb and let
1775                          * it rip.
1776                          */
1777                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1778                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1780                                      lbq_desc->p.pg_chunk.offset, length);
1781                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782                                                 lbq_desc->p.pg_chunk.offset,
1783                                                 length);
1784                         skb->len += length;
1785                         skb->data_len += length;
1786                         skb->truesize += length;
1787                 } else {
1788                         /*
1789                          * The headers and data are in a single large buffer. We
1790                          * copy it to a new skb and let it go. This can happen with
1791                          * jumbo mtu on a non-TCP/UDP frame.
1792                          */
1793                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1794                         skb = netdev_alloc_skb(qdev->ndev, length);
1795                         if (!skb) {
1796                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1797                                              "No skb available, drop the packet.\n");
1798                                 return NULL;
1799                         }
1800                         pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1801                                        qdev->lbq_buf_size,
1802                                        PCI_DMA_FROMDEVICE);
1803                         skb_reserve(skb, NET_IP_ALIGN);
1804                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1805                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1806                                      length);
1807                         skb_fill_page_desc(skb, 0,
1808                                                 lbq_desc->p.pg_chunk.page,
1809                                                 lbq_desc->p.pg_chunk.offset,
1810                                                 length);
1811                         skb->len += length;
1812                         skb->data_len += length;
1813                         skb->truesize += length;
1814                         ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1815                                               lbq_desc->p.pg_chunk.va,
1816                                               &hlen);
1817                         __pskb_pull_tail(skb, hlen);
1818                 }
1819         } else {
1820                 /*
1821                  * The data is in a chain of large buffers
1822                  * pointed to by a small buffer.  We loop
1823                  * thru and chain them to the our small header
1824                  * buffer's skb.
1825                  * frags:  There are 18 max frags and our small
1826                  *         buffer will hold 32 of them. The thing is,
1827                  *         we'll use 3 max for our 9000 byte jumbo
1828                  *         frames.  If the MTU goes up we could
1829                  *          eventually be in trouble.
1830                  */
1831                 int size, i = 0;
1832                 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1833                 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1834                                  SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1835                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1836                         /*
1837                          * This is an non TCP/UDP IP frame, so
1838                          * the headers aren't split into a small
1839                          * buffer.  We have to use the small buffer
1840                          * that contains our sg list as our skb to
1841                          * send upstairs. Copy the sg list here to
1842                          * a local buffer and use it to find the
1843                          * pages to chain.
1844                          */
1845                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846                                      "%d bytes of headers & data in chain of large.\n",
1847                                      length);
1848                         skb = sbq_desc->p.skb;
1849                         sbq_desc->p.skb = NULL;
1850                         skb_reserve(skb, NET_IP_ALIGN);
1851                 }
1852                 do {
1853                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854                         size = min(length, qdev->lbq_buf_size);
1855
1856                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857                                      "Adding page %d to skb for %d bytes.\n",
1858                                      i, size);
1859                         skb_fill_page_desc(skb, i,
1860                                                 lbq_desc->p.pg_chunk.page,
1861                                                 lbq_desc->p.pg_chunk.offset,
1862                                                 size);
1863                         skb->len += size;
1864                         skb->data_len += size;
1865                         skb->truesize += size;
1866                         length -= size;
1867                         i++;
1868                 } while (length > 0);
1869                 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1870                                       &hlen);
1871                 __pskb_pull_tail(skb, hlen);
1872         }
1873         return skb;
1874 }
1875
1876 /* Process an inbound completion from an rx ring. */
1877 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1878                                    struct rx_ring *rx_ring,
1879                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1880                                    u16 vlan_id)
1881 {
1882         struct net_device *ndev = qdev->ndev;
1883         struct sk_buff *skb = NULL;
1884
1885         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1886
1887         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1888         if (unlikely(!skb)) {
1889                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1890                              "No skb available, drop packet.\n");
1891                 rx_ring->rx_dropped++;
1892                 return;
1893         }
1894
1895         /* Frame error, so drop the packet. */
1896         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1897                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1898                 dev_kfree_skb_any(skb);
1899                 return;
1900         }
1901
1902         /* The max framesize filter on this chip is set higher than
1903          * MTU since FCoE uses 2k frames.
1904          */
1905         if (skb->len > ndev->mtu + ETH_HLEN) {
1906                 dev_kfree_skb_any(skb);
1907                 rx_ring->rx_dropped++;
1908                 return;
1909         }
1910
1911         /* loopback self test for ethtool */
1912         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1913                 ql_check_lb_frame(qdev, skb);
1914                 dev_kfree_skb_any(skb);
1915                 return;
1916         }
1917
1918         prefetch(skb->data);
1919         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1920                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1921                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1922                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1923                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1924                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1925                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1926                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1927                 rx_ring->rx_multicast++;
1928         }
1929         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1930                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931                              "Promiscuous Packet.\n");
1932         }
1933
1934         skb->protocol = eth_type_trans(skb, ndev);
1935         skb_checksum_none_assert(skb);
1936
1937         /* If rx checksum is on, and there are no
1938          * csum or frame errors.
1939          */
1940         if ((ndev->features & NETIF_F_RXCSUM) &&
1941                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1942                 /* TCP frame. */
1943                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1944                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945                                      "TCP checksum done!\n");
1946                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1947                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1948                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1949                 /* Unfragmented ipv4 UDP frame. */
1950                         struct iphdr *iph = (struct iphdr *) skb->data;
1951                         if (!(iph->frag_off &
1952                                 htons(IP_MF|IP_OFFSET))) {
1953                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1954                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1955                                              "TCP checksum done!\n");
1956                         }
1957                 }
1958         }
1959
1960         rx_ring->rx_packets++;
1961         rx_ring->rx_bytes += skb->len;
1962         skb_record_rx_queue(skb, rx_ring->cq_id);
1963         if (vlan_id != 0xffff)
1964                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1965         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1966                 napi_gro_receive(&rx_ring->napi, skb);
1967         else
1968                 netif_receive_skb(skb);
1969 }
1970
1971 /* Process an inbound completion from an rx ring. */
1972 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1973                                         struct rx_ring *rx_ring,
1974                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
1975 {
1976         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1977         u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1978                         (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1979                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1980                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1981
1982         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1983
1984         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1985                 /* The data and headers are split into
1986                  * separate buffers.
1987                  */
1988                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1989                                                 vlan_id);
1990         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1991                 /* The data fit in a single small buffer.
1992                  * Allocate a new skb, copy the data and
1993                  * return the buffer to the free pool.
1994                  */
1995                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
1996                                                 length, vlan_id);
1997         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1998                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1999                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2000                 /* TCP packet in a page chunk that's been checksummed.
2001                  * Tack it on to our GRO skb and let it go.
2002                  */
2003                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2004                                                 length, vlan_id);
2005         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2006                 /* Non-TCP packet in a page chunk. Allocate an
2007                  * skb, tack it on frags, and send it up.
2008                  */
2009                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2010                                                 length, vlan_id);
2011         } else {
2012                 /* Non-TCP/UDP large frames that span multiple buffers
2013                  * can be processed corrrectly by the split frame logic.
2014                  */
2015                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2016                                                 vlan_id);
2017         }
2018
2019         return (unsigned long)length;
2020 }
2021
2022 /* Process an outbound completion from an rx ring. */
2023 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2024                                    struct ob_mac_iocb_rsp *mac_rsp)
2025 {
2026         struct tx_ring *tx_ring;
2027         struct tx_ring_desc *tx_ring_desc;
2028
2029         QL_DUMP_OB_MAC_RSP(mac_rsp);
2030         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2031         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2032         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2033         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2034         tx_ring->tx_packets++;
2035         dev_kfree_skb(tx_ring_desc->skb);
2036         tx_ring_desc->skb = NULL;
2037
2038         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2039                                         OB_MAC_IOCB_RSP_S |
2040                                         OB_MAC_IOCB_RSP_L |
2041                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2042                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2043                         netif_warn(qdev, tx_done, qdev->ndev,
2044                                    "Total descriptor length did not match transfer length.\n");
2045                 }
2046                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2047                         netif_warn(qdev, tx_done, qdev->ndev,
2048                                    "Frame too short to be valid, not sent.\n");
2049                 }
2050                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2051                         netif_warn(qdev, tx_done, qdev->ndev,
2052                                    "Frame too long, but sent anyway.\n");
2053                 }
2054                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2055                         netif_warn(qdev, tx_done, qdev->ndev,
2056                                    "PCI backplane error. Frame not sent.\n");
2057                 }
2058         }
2059         atomic_inc(&tx_ring->tx_count);
2060 }
2061
2062 /* Fire up a handler to reset the MPI processor. */
2063 void ql_queue_fw_error(struct ql_adapter *qdev)
2064 {
2065         ql_link_off(qdev);
2066         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2067 }
2068
2069 void ql_queue_asic_error(struct ql_adapter *qdev)
2070 {
2071         ql_link_off(qdev);
2072         ql_disable_interrupts(qdev);
2073         /* Clear adapter up bit to signal the recovery
2074          * process that it shouldn't kill the reset worker
2075          * thread
2076          */
2077         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2078         /* Set asic recovery bit to indicate reset process that we are
2079          * in fatal error recovery process rather than normal close
2080          */
2081         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2082         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2083 }
2084
2085 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2086                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2087 {
2088         switch (ib_ae_rsp->event) {
2089         case MGMT_ERR_EVENT:
2090                 netif_err(qdev, rx_err, qdev->ndev,
2091                           "Management Processor Fatal Error.\n");
2092                 ql_queue_fw_error(qdev);
2093                 return;
2094
2095         case CAM_LOOKUP_ERR_EVENT:
2096                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2097                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2098                 ql_queue_asic_error(qdev);
2099                 return;
2100
2101         case SOFT_ECC_ERROR_EVENT:
2102                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2103                 ql_queue_asic_error(qdev);
2104                 break;
2105
2106         case PCI_ERR_ANON_BUF_RD:
2107                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2108                                         "anonymous buffers from rx_ring %d.\n",
2109                                         ib_ae_rsp->q_id);
2110                 ql_queue_asic_error(qdev);
2111                 break;
2112
2113         default:
2114                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2115                           ib_ae_rsp->event);
2116                 ql_queue_asic_error(qdev);
2117                 break;
2118         }
2119 }
2120
2121 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2122 {
2123         struct ql_adapter *qdev = rx_ring->qdev;
2124         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2125         struct ob_mac_iocb_rsp *net_rsp = NULL;
2126         int count = 0;
2127
2128         struct tx_ring *tx_ring;
2129         /* While there are entries in the completion queue. */
2130         while (prod != rx_ring->cnsmr_idx) {
2131
2132                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2133                              "cq_id = %d, prod = %d, cnsmr = %d\n",
2134                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2135
2136                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2137                 rmb();
2138                 switch (net_rsp->opcode) {
2139
2140                 case OPCODE_OB_MAC_TSO_IOCB:
2141                 case OPCODE_OB_MAC_IOCB:
2142                         ql_process_mac_tx_intr(qdev, net_rsp);
2143                         break;
2144                 default:
2145                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2147                                      net_rsp->opcode);
2148                 }
2149                 count++;
2150                 ql_update_cq(rx_ring);
2151                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2152         }
2153         if (!net_rsp)
2154                 return 0;
2155         ql_write_cq_idx(rx_ring);
2156         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2157         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2158                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2159                         /*
2160                          * The queue got stopped because the tx_ring was full.
2161                          * Wake it up, because it's now at least 25% empty.
2162                          */
2163                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2164         }
2165
2166         return count;
2167 }
2168
2169 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2170 {
2171         struct ql_adapter *qdev = rx_ring->qdev;
2172         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2173         struct ql_net_rsp_iocb *net_rsp;
2174         int count = 0;
2175
2176         /* While there are entries in the completion queue. */
2177         while (prod != rx_ring->cnsmr_idx) {
2178
2179                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2180                              "cq_id = %d, prod = %d, cnsmr = %d\n",
2181                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2182
2183                 net_rsp = rx_ring->curr_entry;
2184                 rmb();
2185                 switch (net_rsp->opcode) {
2186                 case OPCODE_IB_MAC_IOCB:
2187                         ql_process_mac_rx_intr(qdev, rx_ring,
2188                                                (struct ib_mac_iocb_rsp *)
2189                                                net_rsp);
2190                         break;
2191
2192                 case OPCODE_IB_AE_IOCB:
2193                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2194                                                 net_rsp);
2195                         break;
2196                 default:
2197                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2198                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2199                                      net_rsp->opcode);
2200                         break;
2201                 }
2202                 count++;
2203                 ql_update_cq(rx_ring);
2204                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2205                 if (count == budget)
2206                         break;
2207         }
2208         ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2209         ql_write_cq_idx(rx_ring);
2210         return count;
2211 }
2212
2213 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2214 {
2215         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2216         struct ql_adapter *qdev = rx_ring->qdev;
2217         struct rx_ring *trx_ring;
2218         int i, work_done = 0;
2219         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2220
2221         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2222                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2223
2224         /* Service the TX rings first.  They start
2225          * right after the RSS rings. */
2226         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2227                 trx_ring = &qdev->rx_ring[i];
2228                 /* If this TX completion ring belongs to this vector and
2229                  * it's not empty then service it.
2230                  */
2231                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2232                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2233                                         trx_ring->cnsmr_idx)) {
2234                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2235                                      "%s: Servicing TX completion ring %d.\n",
2236                                      __func__, trx_ring->cq_id);
2237                         ql_clean_outbound_rx_ring(trx_ring);
2238                 }
2239         }
2240
2241         /*
2242          * Now service the RSS ring if it's active.
2243          */
2244         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2245                                         rx_ring->cnsmr_idx) {
2246                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2247                              "%s: Servicing RX completion ring %d.\n",
2248                              __func__, rx_ring->cq_id);
2249                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2250         }
2251
2252         if (work_done < budget) {
2253                 napi_complete_done(napi, work_done);
2254                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2255         }
2256         return work_done;
2257 }
2258
2259 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2260 {
2261         struct ql_adapter *qdev = netdev_priv(ndev);
2262
2263         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2264                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2265                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2266         } else {
2267                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2268         }
2269 }
2270
2271 /**
2272  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2273  * based on the features to enable/disable hardware vlan accel
2274  */
2275 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2276                                         netdev_features_t features)
2277 {
2278         struct ql_adapter *qdev = netdev_priv(ndev);
2279         int status = 0;
2280         bool need_restart = netif_running(ndev);
2281
2282         if (need_restart) {
2283                 status = ql_adapter_down(qdev);
2284                 if (status) {
2285                         netif_err(qdev, link, qdev->ndev,
2286                                   "Failed to bring down the adapter\n");
2287                         return status;
2288                 }
2289         }
2290
2291         /* update the features with resent change */
2292         ndev->features = features;
2293
2294         if (need_restart) {
2295                 status = ql_adapter_up(qdev);
2296                 if (status) {
2297                         netif_err(qdev, link, qdev->ndev,
2298                                   "Failed to bring up the adapter\n");
2299                         return status;
2300                 }
2301         }
2302
2303         return status;
2304 }
2305
2306 static int qlge_set_features(struct net_device *ndev,
2307         netdev_features_t features)
2308 {
2309         netdev_features_t changed = ndev->features ^ features;
2310         int err;
2311
2312         if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2313                 /* Update the behavior of vlan accel in the adapter */
2314                 err = qlge_update_hw_vlan_features(ndev, features);
2315                 if (err)
2316                         return err;
2317
2318                 qlge_vlan_mode(ndev, features);
2319         }
2320
2321         return 0;
2322 }
2323
2324 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2325 {
2326         u32 enable_bit = MAC_ADDR_E;
2327         int err;
2328
2329         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2330                                   MAC_ADDR_TYPE_VLAN, vid);
2331         if (err)
2332                 netif_err(qdev, ifup, qdev->ndev,
2333                           "Failed to init vlan address.\n");
2334         return err;
2335 }
2336
2337 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2338 {
2339         struct ql_adapter *qdev = netdev_priv(ndev);
2340         int status;
2341         int err;
2342
2343         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2344         if (status)
2345                 return status;
2346
2347         err = __qlge_vlan_rx_add_vid(qdev, vid);
2348         set_bit(vid, qdev->active_vlans);
2349
2350         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2351
2352         return err;
2353 }
2354
2355 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2356 {
2357         u32 enable_bit = 0;
2358         int err;
2359
2360         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2361                                   MAC_ADDR_TYPE_VLAN, vid);
2362         if (err)
2363                 netif_err(qdev, ifup, qdev->ndev,
2364                           "Failed to clear vlan address.\n");
2365         return err;
2366 }
2367
2368 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2369 {
2370         struct ql_adapter *qdev = netdev_priv(ndev);
2371         int status;
2372         int err;
2373
2374         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2375         if (status)
2376                 return status;
2377
2378         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2379         clear_bit(vid, qdev->active_vlans);
2380
2381         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2382
2383         return err;
2384 }
2385
2386 static void qlge_restore_vlan(struct ql_adapter *qdev)
2387 {
2388         int status;
2389         u16 vid;
2390
2391         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2392         if (status)
2393                 return;
2394
2395         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2396                 __qlge_vlan_rx_add_vid(qdev, vid);
2397
2398         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2399 }
2400
2401 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2402 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2403 {
2404         struct rx_ring *rx_ring = dev_id;
2405         napi_schedule(&rx_ring->napi);
2406         return IRQ_HANDLED;
2407 }
2408
2409 /* This handles a fatal error, MPI activity, and the default
2410  * rx_ring in an MSI-X multiple vector environment.
2411  * In MSI/Legacy environment it also process the rest of
2412  * the rx_rings.
2413  */
2414 static irqreturn_t qlge_isr(int irq, void *dev_id)
2415 {
2416         struct rx_ring *rx_ring = dev_id;
2417         struct ql_adapter *qdev = rx_ring->qdev;
2418         struct intr_context *intr_context = &qdev->intr_context[0];
2419         u32 var;
2420         int work_done = 0;
2421
2422         /* Experience shows that when using INTx interrupts, interrupts must
2423          * be masked manually.
2424          * When using MSI mode, INTR_EN_EN must be explicitly disabled
2425          * (even though it is auto-masked), otherwise a later command to
2426          * enable it is not effective.
2427          */
2428         if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2429                 ql_disable_completion_interrupt(qdev, 0);
2430
2431         var = ql_read32(qdev, STS);
2432
2433         /*
2434          * Check for fatal error.
2435          */
2436         if (var & STS_FE) {
2437                 ql_disable_completion_interrupt(qdev, 0);
2438                 ql_queue_asic_error(qdev);
2439                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2440                 var = ql_read32(qdev, ERR_STS);
2441                 netdev_err(qdev->ndev, "Resetting chip. "
2442                                         "Error Status Register = 0x%x\n", var);
2443                 return IRQ_HANDLED;
2444         }
2445
2446         /*
2447          * Check MPI processor activity.
2448          */
2449         if ((var & STS_PI) &&
2450                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2451                 /*
2452                  * We've got an async event or mailbox completion.
2453                  * Handle it and clear the source of the interrupt.
2454                  */
2455                 netif_err(qdev, intr, qdev->ndev,
2456                           "Got MPI processor interrupt.\n");
2457                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2458                 queue_delayed_work_on(smp_processor_id(),
2459                                 qdev->workqueue, &qdev->mpi_work, 0);
2460                 work_done++;
2461         }
2462
2463         /*
2464          * Get the bit-mask that shows the active queues for this
2465          * pass.  Compare it to the queues that this irq services
2466          * and call napi if there's a match.
2467          */
2468         var = ql_read32(qdev, ISR1);
2469         if (var & intr_context->irq_mask) {
2470                 netif_info(qdev, intr, qdev->ndev,
2471                            "Waking handler for rx_ring[0].\n");
2472                 napi_schedule(&rx_ring->napi);
2473                 work_done++;
2474         } else {
2475                 /* Experience shows that the device sometimes signals an
2476                  * interrupt but no work is scheduled from this function.
2477                  * Nevertheless, the interrupt is auto-masked. Therefore, we
2478                  * systematically re-enable the interrupt if we didn't
2479                  * schedule napi.
2480                  */
2481                 ql_enable_completion_interrupt(qdev, 0);
2482         }
2483
2484         return work_done ? IRQ_HANDLED : IRQ_NONE;
2485 }
2486
2487 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2488 {
2489
2490         if (skb_is_gso(skb)) {
2491                 int err;
2492                 __be16 l3_proto = vlan_get_protocol(skb);
2493
2494                 err = skb_cow_head(skb, 0);
2495                 if (err < 0)
2496                         return err;
2497
2498                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2499                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2500                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2501                 mac_iocb_ptr->total_hdrs_len =
2502                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2503                 mac_iocb_ptr->net_trans_offset =
2504                     cpu_to_le16(skb_network_offset(skb) |
2505                                 skb_transport_offset(skb)
2506                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2507                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2508                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2509                 if (likely(l3_proto == htons(ETH_P_IP))) {
2510                         struct iphdr *iph = ip_hdr(skb);
2511                         iph->check = 0;
2512                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2514                                                                  iph->daddr, 0,
2515                                                                  IPPROTO_TCP,
2516                                                                  0);
2517                 } else if (l3_proto == htons(ETH_P_IPV6)) {
2518                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2519                         tcp_hdr(skb)->check =
2520                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2521                                              &ipv6_hdr(skb)->daddr,
2522                                              0, IPPROTO_TCP, 0);
2523                 }
2524                 return 1;
2525         }
2526         return 0;
2527 }
2528
2529 static void ql_hw_csum_setup(struct sk_buff *skb,
2530                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2531 {
2532         int len;
2533         struct iphdr *iph = ip_hdr(skb);
2534         __sum16 *check;
2535         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2536         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2537         mac_iocb_ptr->net_trans_offset =
2538                 cpu_to_le16(skb_network_offset(skb) |
2539                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2540
2541         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2542         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2543         if (likely(iph->protocol == IPPROTO_TCP)) {
2544                 check = &(tcp_hdr(skb)->check);
2545                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2546                 mac_iocb_ptr->total_hdrs_len =
2547                     cpu_to_le16(skb_transport_offset(skb) +
2548                                 (tcp_hdr(skb)->doff << 2));
2549         } else {
2550                 check = &(udp_hdr(skb)->check);
2551                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2552                 mac_iocb_ptr->total_hdrs_len =
2553                     cpu_to_le16(skb_transport_offset(skb) +
2554                                 sizeof(struct udphdr));
2555         }
2556         *check = ~csum_tcpudp_magic(iph->saddr,
2557                                     iph->daddr, len, iph->protocol, 0);
2558 }
2559
2560 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2561 {
2562         struct tx_ring_desc *tx_ring_desc;
2563         struct ob_mac_iocb_req *mac_iocb_ptr;
2564         struct ql_adapter *qdev = netdev_priv(ndev);
2565         int tso;
2566         struct tx_ring *tx_ring;
2567         u32 tx_ring_idx = (u32) skb->queue_mapping;
2568
2569         tx_ring = &qdev->tx_ring[tx_ring_idx];
2570
2571         if (skb_padto(skb, ETH_ZLEN))
2572                 return NETDEV_TX_OK;
2573
2574         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2575                 netif_info(qdev, tx_queued, qdev->ndev,
2576                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2577                            __func__, tx_ring_idx);
2578                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2579                 tx_ring->tx_errors++;
2580                 return NETDEV_TX_BUSY;
2581         }
2582         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2583         mac_iocb_ptr = tx_ring_desc->queue_entry;
2584         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2585
2586         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2587         mac_iocb_ptr->tid = tx_ring_desc->index;
2588         /* We use the upper 32-bits to store the tx queue for this IO.
2589          * When we get the completion we can use it to establish the context.
2590          */
2591         mac_iocb_ptr->txq_idx = tx_ring_idx;
2592         tx_ring_desc->skb = skb;
2593
2594         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2595
2596         if (skb_vlan_tag_present(skb)) {
2597                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2598                              "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2599                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2600                 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2601         }
2602         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2603         if (tso < 0) {
2604                 dev_kfree_skb_any(skb);
2605                 return NETDEV_TX_OK;
2606         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2607                 ql_hw_csum_setup(skb,
2608                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2609         }
2610         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2611                         NETDEV_TX_OK) {
2612                 netif_err(qdev, tx_queued, qdev->ndev,
2613                           "Could not map the segments.\n");
2614                 tx_ring->tx_errors++;
2615                 return NETDEV_TX_BUSY;
2616         }
2617         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2618         tx_ring->prod_idx++;
2619         if (tx_ring->prod_idx == tx_ring->wq_len)
2620                 tx_ring->prod_idx = 0;
2621         wmb();
2622
2623         ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2624         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2625                      "tx queued, slot %d, len %d\n",
2626                      tx_ring->prod_idx, skb->len);
2627
2628         atomic_dec(&tx_ring->tx_count);
2629
2630         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2631                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2632                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2633                         /*
2634                          * The queue got stopped because the tx_ring was full.
2635                          * Wake it up, because it's now at least 25% empty.
2636                          */
2637                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2638         }
2639         return NETDEV_TX_OK;
2640 }
2641
2642
2643 static void ql_free_shadow_space(struct ql_adapter *qdev)
2644 {
2645         if (qdev->rx_ring_shadow_reg_area) {
2646                 pci_free_consistent(qdev->pdev,
2647                                     PAGE_SIZE,
2648                                     qdev->rx_ring_shadow_reg_area,
2649                                     qdev->rx_ring_shadow_reg_dma);
2650                 qdev->rx_ring_shadow_reg_area = NULL;
2651         }
2652         if (qdev->tx_ring_shadow_reg_area) {
2653                 pci_free_consistent(qdev->pdev,
2654                                     PAGE_SIZE,
2655                                     qdev->tx_ring_shadow_reg_area,
2656                                     qdev->tx_ring_shadow_reg_dma);
2657                 qdev->tx_ring_shadow_reg_area = NULL;
2658         }
2659 }
2660
2661 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2662 {
2663         qdev->rx_ring_shadow_reg_area =
2664                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2665                                       &qdev->rx_ring_shadow_reg_dma);
2666         if (!qdev->rx_ring_shadow_reg_area) {
2667                 netif_err(qdev, ifup, qdev->ndev,
2668                           "Allocation of RX shadow space failed.\n");
2669                 return -ENOMEM;
2670         }
2671
2672         qdev->tx_ring_shadow_reg_area =
2673                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2674                                       &qdev->tx_ring_shadow_reg_dma);
2675         if (!qdev->tx_ring_shadow_reg_area) {
2676                 netif_err(qdev, ifup, qdev->ndev,
2677                           "Allocation of TX shadow space failed.\n");
2678                 goto err_wqp_sh_area;
2679         }
2680         return 0;
2681
2682 err_wqp_sh_area:
2683         pci_free_consistent(qdev->pdev,
2684                             PAGE_SIZE,
2685                             qdev->rx_ring_shadow_reg_area,
2686                             qdev->rx_ring_shadow_reg_dma);
2687         return -ENOMEM;
2688 }
2689
2690 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2691 {
2692         struct tx_ring_desc *tx_ring_desc;
2693         int i;
2694         struct ob_mac_iocb_req *mac_iocb_ptr;
2695
2696         mac_iocb_ptr = tx_ring->wq_base;
2697         tx_ring_desc = tx_ring->q;
2698         for (i = 0; i < tx_ring->wq_len; i++) {
2699                 tx_ring_desc->index = i;
2700                 tx_ring_desc->skb = NULL;
2701                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2702                 mac_iocb_ptr++;
2703                 tx_ring_desc++;
2704         }
2705         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2706 }
2707
2708 static void ql_free_tx_resources(struct ql_adapter *qdev,
2709                                  struct tx_ring *tx_ring)
2710 {
2711         if (tx_ring->wq_base) {
2712                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2713                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2714                 tx_ring->wq_base = NULL;
2715         }
2716         kfree(tx_ring->q);
2717         tx_ring->q = NULL;
2718 }
2719
2720 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2721                                  struct tx_ring *tx_ring)
2722 {
2723         tx_ring->wq_base =
2724             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2725                                  &tx_ring->wq_base_dma);
2726
2727         if (!tx_ring->wq_base ||
2728             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2729                 goto pci_alloc_err;
2730
2731         tx_ring->q =
2732             kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2733                           GFP_KERNEL);
2734         if (!tx_ring->q)
2735                 goto err;
2736
2737         return 0;
2738 err:
2739         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2740                             tx_ring->wq_base, tx_ring->wq_base_dma);
2741         tx_ring->wq_base = NULL;
2742 pci_alloc_err:
2743         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2744         return -ENOMEM;
2745 }
2746
2747 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2748 {
2749         struct qlge_bq *lbq = &rx_ring->lbq;
2750         unsigned int last_offset;
2751
2752         last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2753         while (lbq->next_to_clean != lbq->next_to_use) {
2754                 struct qlge_bq_desc *lbq_desc =
2755                         &lbq->queue[lbq->next_to_clean];
2756
2757                 if (lbq_desc->p.pg_chunk.offset == last_offset)
2758                         pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
2759                                        ql_lbq_block_size(qdev),
2760                                        PCI_DMA_FROMDEVICE);
2761                 put_page(lbq_desc->p.pg_chunk.page);
2762
2763                 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2764         }
2765
2766         if (rx_ring->master_chunk.page) {
2767                 pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
2768                                ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2769                 put_page(rx_ring->master_chunk.page);
2770                 rx_ring->master_chunk.page = NULL;
2771         }
2772 }
2773
2774 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2775 {
2776         int i;
2777
2778         for (i = 0; i < QLGE_BQ_LEN; i++) {
2779                 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2780
2781                 if (!sbq_desc) {
2782                         netif_err(qdev, ifup, qdev->ndev,
2783                                   "sbq_desc %d is NULL.\n", i);
2784                         return;
2785                 }
2786                 if (sbq_desc->p.skb) {
2787                         pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
2788                                          SMALL_BUF_MAP_SIZE,
2789                                          PCI_DMA_FROMDEVICE);
2790                         dev_kfree_skb(sbq_desc->p.skb);
2791                         sbq_desc->p.skb = NULL;
2792                 }
2793         }
2794 }
2795
2796 /* Free all large and small rx buffers associated
2797  * with the completion queues for this device.
2798  */
2799 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2800 {
2801         int i;
2802
2803         for (i = 0; i < qdev->rx_ring_count; i++) {
2804                 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2805
2806                 if (rx_ring->lbq.queue)
2807                         ql_free_lbq_buffers(qdev, rx_ring);
2808                 if (rx_ring->sbq.queue)
2809                         ql_free_sbq_buffers(qdev, rx_ring);
2810         }
2811 }
2812
2813 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2814 {
2815         int i;
2816
2817         for (i = 0; i < qdev->rss_ring_count; i++)
2818                 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2819                                         HZ / 2);
2820 }
2821
2822 static int qlge_init_bq(struct qlge_bq *bq)
2823 {
2824         struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2825         struct ql_adapter *qdev = rx_ring->qdev;
2826         struct qlge_bq_desc *bq_desc;
2827         __le64 *buf_ptr;
2828         int i;
2829
2830         bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
2831                                         &bq->base_dma);
2832         if (!bq->base) {
2833                 netif_err(qdev, ifup, qdev->ndev,
2834                           "ring %u %s allocation failed.\n", rx_ring->cq_id,
2835                           bq_type_name[bq->type]);
2836                 return -ENOMEM;
2837         }
2838
2839         bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2840                                   GFP_KERNEL);
2841         if (!bq->queue)
2842                 return -ENOMEM;
2843
2844         buf_ptr = bq->base;
2845         bq_desc = &bq->queue[0];
2846         for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2847                 bq_desc->p.skb = NULL;
2848                 bq_desc->index = i;
2849                 bq_desc->buf_ptr = buf_ptr;
2850         }
2851
2852         return 0;
2853 }
2854
2855 static void ql_free_rx_resources(struct ql_adapter *qdev,
2856                                  struct rx_ring *rx_ring)
2857 {
2858         /* Free the small buffer queue. */
2859         if (rx_ring->sbq.base) {
2860                 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2861                                     rx_ring->sbq.base, rx_ring->sbq.base_dma);
2862                 rx_ring->sbq.base = NULL;
2863         }
2864
2865         /* Free the small buffer queue control blocks. */
2866         kfree(rx_ring->sbq.queue);
2867         rx_ring->sbq.queue = NULL;
2868
2869         /* Free the large buffer queue. */
2870         if (rx_ring->lbq.base) {
2871                 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2872                                     rx_ring->lbq.base, rx_ring->lbq.base_dma);
2873                 rx_ring->lbq.base = NULL;
2874         }
2875
2876         /* Free the large buffer queue control blocks. */
2877         kfree(rx_ring->lbq.queue);
2878         rx_ring->lbq.queue = NULL;
2879
2880         /* Free the rx queue. */
2881         if (rx_ring->cq_base) {
2882                 pci_free_consistent(qdev->pdev,
2883                                     rx_ring->cq_size,
2884                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2885                 rx_ring->cq_base = NULL;
2886         }
2887 }
2888
2889 /* Allocate queues and buffers for this completions queue based
2890  * on the values in the parameter structure. */
2891 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2892                                  struct rx_ring *rx_ring)
2893 {
2894
2895         /*
2896          * Allocate the completion queue for this rx_ring.
2897          */
2898         rx_ring->cq_base =
2899             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2900                                  &rx_ring->cq_base_dma);
2901
2902         if (!rx_ring->cq_base) {
2903                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2904                 return -ENOMEM;
2905         }
2906
2907         if (rx_ring->cq_id < qdev->rss_ring_count &&
2908             (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2909                 ql_free_rx_resources(qdev, rx_ring);
2910                 return -ENOMEM;
2911         }
2912
2913         return 0;
2914 }
2915
2916 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2917 {
2918         struct tx_ring *tx_ring;
2919         struct tx_ring_desc *tx_ring_desc;
2920         int i, j;
2921
2922         /*
2923          * Loop through all queues and free
2924          * any resources.
2925          */
2926         for (j = 0; j < qdev->tx_ring_count; j++) {
2927                 tx_ring = &qdev->tx_ring[j];
2928                 for (i = 0; i < tx_ring->wq_len; i++) {
2929                         tx_ring_desc = &tx_ring->q[i];
2930                         if (tx_ring_desc && tx_ring_desc->skb) {
2931                                 netif_err(qdev, ifdown, qdev->ndev,
2932                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2933                                           tx_ring_desc->skb, j,
2934                                           tx_ring_desc->index);
2935                                 ql_unmap_send(qdev, tx_ring_desc,
2936                                               tx_ring_desc->map_cnt);
2937                                 dev_kfree_skb(tx_ring_desc->skb);
2938                                 tx_ring_desc->skb = NULL;
2939                         }
2940                 }
2941         }
2942 }
2943
2944 static void ql_free_mem_resources(struct ql_adapter *qdev)
2945 {
2946         int i;
2947
2948         for (i = 0; i < qdev->tx_ring_count; i++)
2949                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2950         for (i = 0; i < qdev->rx_ring_count; i++)
2951                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2952         ql_free_shadow_space(qdev);
2953 }
2954
2955 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2956 {
2957         int i;
2958
2959         /* Allocate space for our shadow registers and such. */
2960         if (ql_alloc_shadow_space(qdev))
2961                 return -ENOMEM;
2962
2963         for (i = 0; i < qdev->rx_ring_count; i++) {
2964                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2965                         netif_err(qdev, ifup, qdev->ndev,
2966                                   "RX resource allocation failed.\n");
2967                         goto err_mem;
2968                 }
2969         }
2970         /* Allocate tx queue resources */
2971         for (i = 0; i < qdev->tx_ring_count; i++) {
2972                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2973                         netif_err(qdev, ifup, qdev->ndev,
2974                                   "TX resource allocation failed.\n");
2975                         goto err_mem;
2976                 }
2977         }
2978         return 0;
2979
2980 err_mem:
2981         ql_free_mem_resources(qdev);
2982         return -ENOMEM;
2983 }
2984
2985 /* Set up the rx ring control block and pass it to the chip.
2986  * The control block is defined as
2987  * "Completion Queue Initialization Control Block", or cqicb.
2988  */
2989 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2990 {
2991         struct cqicb *cqicb = &rx_ring->cqicb;
2992         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2993                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2994         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2995                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2996         void __iomem *doorbell_area =
2997             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2998         int err = 0;
2999         u64 tmp;
3000         __le64 *base_indirect_ptr;
3001         int page_entries;
3002
3003         /* Set up the shadow registers for this ring. */
3004         rx_ring->prod_idx_sh_reg = shadow_reg;
3005         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3006         *rx_ring->prod_idx_sh_reg = 0;
3007         shadow_reg += sizeof(u64);
3008         shadow_reg_dma += sizeof(u64);
3009         rx_ring->lbq.base_indirect = shadow_reg;
3010         rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
3011         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3012         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3013         rx_ring->sbq.base_indirect = shadow_reg;
3014         rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
3015
3016         /* PCI doorbell mem area + 0x00 for consumer index register */
3017         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3018         rx_ring->cnsmr_idx = 0;
3019         rx_ring->curr_entry = rx_ring->cq_base;
3020
3021         /* PCI doorbell mem area + 0x04 for valid register */
3022         rx_ring->valid_db_reg = doorbell_area + 0x04;
3023
3024         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3025         rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3026
3027         /* PCI doorbell mem area + 0x1c */
3028         rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3029
3030         memset((void *)cqicb, 0, sizeof(struct cqicb));
3031         cqicb->msix_vect = rx_ring->irq;
3032
3033         cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3034                                  LEN_CPP_CONT);
3035
3036         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3037
3038         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3039
3040         /*
3041          * Set up the control block load flags.
3042          */
3043         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3044             FLAGS_LV |          /* Load MSI-X vector */
3045             FLAGS_LI;           /* Load irq delay values */
3046         if (rx_ring->cq_id < qdev->rss_ring_count) {
3047                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3048                 tmp = (u64)rx_ring->lbq.base_dma;
3049                 base_indirect_ptr = rx_ring->lbq.base_indirect;
3050                 page_entries = 0;
3051                 do {
3052                         *base_indirect_ptr = cpu_to_le64(tmp);
3053                         tmp += DB_PAGE_SIZE;
3054                         base_indirect_ptr++;
3055                         page_entries++;
3056                 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3057                 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3058                 cqicb->lbq_buf_size =
3059                         cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3060                 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3061                 rx_ring->lbq.next_to_use = 0;
3062                 rx_ring->lbq.next_to_clean = 0;
3063
3064                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3065                 tmp = (u64)rx_ring->sbq.base_dma;
3066                 base_indirect_ptr = rx_ring->sbq.base_indirect;
3067                 page_entries = 0;
3068                 do {
3069                         *base_indirect_ptr = cpu_to_le64(tmp);
3070                         tmp += DB_PAGE_SIZE;
3071                         base_indirect_ptr++;
3072                         page_entries++;
3073                 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3074                 cqicb->sbq_addr =
3075                     cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3076                 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3077                 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3078                 rx_ring->sbq.next_to_use = 0;
3079                 rx_ring->sbq.next_to_clean = 0;
3080         }
3081         if (rx_ring->cq_id < qdev->rss_ring_count) {
3082                 /* Inbound completion handling rx_rings run in
3083                  * separate NAPI contexts.
3084                  */
3085                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3086                                64);
3087                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3088                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3089         } else {
3090                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3091                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3092         }
3093         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3094                            CFG_LCQ, rx_ring->cq_id);
3095         if (err) {
3096                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3097                 return err;
3098         }
3099         return err;
3100 }
3101
3102 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3103 {
3104         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3105         void __iomem *doorbell_area =
3106             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3107         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3108             (tx_ring->wq_id * sizeof(u64));
3109         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3110             (tx_ring->wq_id * sizeof(u64));
3111         int err = 0;
3112
3113         /*
3114          * Assign doorbell registers for this tx_ring.
3115          */
3116         /* TX PCI doorbell mem area for tx producer index */
3117         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3118         tx_ring->prod_idx = 0;
3119         /* TX PCI doorbell mem area + 0x04 */
3120         tx_ring->valid_db_reg = doorbell_area + 0x04;
3121
3122         /*
3123          * Assign shadow registers for this tx_ring.
3124          */
3125         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3126         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3127
3128         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3129         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3130                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3131         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3132         wqicb->rid = 0;
3133         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3134
3135         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3136
3137         ql_init_tx_ring(qdev, tx_ring);
3138
3139         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3140                            (u16) tx_ring->wq_id);
3141         if (err) {
3142                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3143                 return err;
3144         }
3145         return err;
3146 }
3147
3148 static void ql_disable_msix(struct ql_adapter *qdev)
3149 {
3150         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3151                 pci_disable_msix(qdev->pdev);
3152                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3153                 kfree(qdev->msi_x_entry);
3154                 qdev->msi_x_entry = NULL;
3155         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3156                 pci_disable_msi(qdev->pdev);
3157                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3158         }
3159 }
3160
3161 /* We start by trying to get the number of vectors
3162  * stored in qdev->intr_count. If we don't get that
3163  * many then we reduce the count and try again.
3164  */
3165 static void ql_enable_msix(struct ql_adapter *qdev)
3166 {
3167         int i, err;
3168
3169         /* Get the MSIX vectors. */
3170         if (qlge_irq_type == MSIX_IRQ) {
3171                 /* Try to alloc space for the msix struct,
3172                  * if it fails then go to MSI/legacy.
3173                  */
3174                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3175                                             sizeof(struct msix_entry),
3176                                             GFP_KERNEL);
3177                 if (!qdev->msi_x_entry) {
3178                         qlge_irq_type = MSI_IRQ;
3179                         goto msi;
3180                 }
3181
3182                 for (i = 0; i < qdev->intr_count; i++)
3183                         qdev->msi_x_entry[i].entry = i;
3184
3185                 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3186                                             1, qdev->intr_count);
3187                 if (err < 0) {
3188                         kfree(qdev->msi_x_entry);
3189                         qdev->msi_x_entry = NULL;
3190                         netif_warn(qdev, ifup, qdev->ndev,
3191                                    "MSI-X Enable failed, trying MSI.\n");
3192                         qlge_irq_type = MSI_IRQ;
3193                 } else {
3194                         qdev->intr_count = err;
3195                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3196                         netif_info(qdev, ifup, qdev->ndev,
3197                                    "MSI-X Enabled, got %d vectors.\n",
3198                                    qdev->intr_count);
3199                         return;
3200                 }
3201         }
3202 msi:
3203         qdev->intr_count = 1;
3204         if (qlge_irq_type == MSI_IRQ) {
3205                 if (!pci_enable_msi(qdev->pdev)) {
3206                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3207                         netif_info(qdev, ifup, qdev->ndev,
3208                                    "Running with MSI interrupts.\n");
3209                         return;
3210                 }
3211         }
3212         qlge_irq_type = LEG_IRQ;
3213         set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3214         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3215                      "Running with legacy interrupts.\n");
3216 }
3217
3218 /* Each vector services 1 RSS ring and and 1 or more
3219  * TX completion rings.  This function loops through
3220  * the TX completion rings and assigns the vector that
3221  * will service it.  An example would be if there are
3222  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3223  * This would mean that vector 0 would service RSS ring 0
3224  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3225  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3226  */
3227 static void ql_set_tx_vect(struct ql_adapter *qdev)
3228 {
3229         int i, j, vect;
3230         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3231
3232         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3233                 /* Assign irq vectors to TX rx_rings.*/
3234                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3235                                          i < qdev->rx_ring_count; i++) {
3236                         if (j == tx_rings_per_vector) {
3237                                 vect++;
3238                                 j = 0;
3239                         }
3240                         qdev->rx_ring[i].irq = vect;
3241                         j++;
3242                 }
3243         } else {
3244                 /* For single vector all rings have an irq
3245                  * of zero.
3246                  */
3247                 for (i = 0; i < qdev->rx_ring_count; i++)
3248                         qdev->rx_ring[i].irq = 0;
3249         }
3250 }
3251
3252 /* Set the interrupt mask for this vector.  Each vector
3253  * will service 1 RSS ring and 1 or more TX completion
3254  * rings.  This function sets up a bit mask per vector
3255  * that indicates which rings it services.
3256  */
3257 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3258 {
3259         int j, vect = ctx->intr;
3260         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3261
3262         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3263                 /* Add the RSS ring serviced by this vector
3264                  * to the mask.
3265                  */
3266                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3267                 /* Add the TX ring(s) serviced by this vector
3268                  * to the mask. */
3269                 for (j = 0; j < tx_rings_per_vector; j++) {
3270                         ctx->irq_mask |=
3271                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3272                         (vect * tx_rings_per_vector) + j].cq_id);
3273                 }
3274         } else {
3275                 /* For single vector we just shift each queue's
3276                  * ID into the mask.
3277                  */
3278                 for (j = 0; j < qdev->rx_ring_count; j++)
3279                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3280         }
3281 }
3282
3283 /*
3284  * Here we build the intr_context structures based on
3285  * our rx_ring count and intr vector count.
3286  * The intr_context structure is used to hook each vector
3287  * to possibly different handlers.
3288  */
3289 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3290 {
3291         int i = 0;
3292         struct intr_context *intr_context = &qdev->intr_context[0];
3293
3294         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3295                 /* Each rx_ring has it's
3296                  * own intr_context since we have separate
3297                  * vectors for each queue.
3298                  */
3299                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3300                         qdev->rx_ring[i].irq = i;
3301                         intr_context->intr = i;
3302                         intr_context->qdev = qdev;
3303                         /* Set up this vector's bit-mask that indicates
3304                          * which queues it services.
3305                          */
3306                         ql_set_irq_mask(qdev, intr_context);
3307                         /*
3308                          * We set up each vectors enable/disable/read bits so
3309                          * there's no bit/mask calculations in the critical path.
3310                          */
3311                         intr_context->intr_en_mask =
3312                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3313                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3314                             | i;
3315                         intr_context->intr_dis_mask =
3316                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3317                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3318                             INTR_EN_IHD | i;
3319                         intr_context->intr_read_mask =
3320                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3321                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3322                             i;
3323                         if (i == 0) {
3324                                 /* The first vector/queue handles
3325                                  * broadcast/multicast, fatal errors,
3326                                  * and firmware events.  This in addition
3327                                  * to normal inbound NAPI processing.
3328                                  */
3329                                 intr_context->handler = qlge_isr;
3330                                 sprintf(intr_context->name, "%s-rx-%d",
3331                                         qdev->ndev->name, i);
3332                         } else {
3333                                 /*
3334                                  * Inbound queues handle unicast frames only.
3335                                  */
3336                                 intr_context->handler = qlge_msix_rx_isr;
3337                                 sprintf(intr_context->name, "%s-rx-%d",
3338                                         qdev->ndev->name, i);
3339                         }
3340                 }
3341         } else {
3342                 /*
3343                  * All rx_rings use the same intr_context since
3344                  * there is only one vector.
3345                  */
3346                 intr_context->intr = 0;
3347                 intr_context->qdev = qdev;
3348                 /*
3349                  * We set up each vectors enable/disable/read bits so
3350                  * there's no bit/mask calculations in the critical path.
3351                  */
3352                 intr_context->intr_en_mask =
3353                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3354                 intr_context->intr_dis_mask =
3355                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3356                     INTR_EN_TYPE_DISABLE;
3357                 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3358                         /* Experience shows that when using INTx interrupts,
3359                          * the device does not always auto-mask INTR_EN_EN.
3360                          * Moreover, masking INTR_EN_EN manually does not
3361                          * immediately prevent interrupt generation.
3362                          */
3363                         intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3364                                 INTR_EN_EI;
3365                         intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3366                 }
3367                 intr_context->intr_read_mask =
3368                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3369                 /*
3370                  * Single interrupt means one handler for all rings.
3371                  */
3372                 intr_context->handler = qlge_isr;
3373                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3374                 /* Set up this vector's bit-mask that indicates
3375                  * which queues it services. In this case there is
3376                  * a single vector so it will service all RSS and
3377                  * TX completion rings.
3378                  */
3379                 ql_set_irq_mask(qdev, intr_context);
3380         }
3381         /* Tell the TX completion rings which MSIx vector
3382          * they will be using.
3383          */
3384         ql_set_tx_vect(qdev);
3385 }
3386
3387 static void ql_free_irq(struct ql_adapter *qdev)
3388 {
3389         int i;
3390         struct intr_context *intr_context = &qdev->intr_context[0];
3391
3392         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3393                 if (intr_context->hooked) {
3394                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3395                                 free_irq(qdev->msi_x_entry[i].vector,
3396                                          &qdev->rx_ring[i]);
3397                         } else {
3398                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3399                         }
3400                 }
3401         }
3402         ql_disable_msix(qdev);
3403 }
3404
3405 static int ql_request_irq(struct ql_adapter *qdev)
3406 {
3407         int i;
3408         int status = 0;
3409         struct pci_dev *pdev = qdev->pdev;
3410         struct intr_context *intr_context = &qdev->intr_context[0];
3411
3412         ql_resolve_queues_to_irqs(qdev);
3413
3414         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3415                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3416                         status = request_irq(qdev->msi_x_entry[i].vector,
3417                                              intr_context->handler,
3418                                              0,
3419                                              intr_context->name,
3420                                              &qdev->rx_ring[i]);
3421                         if (status) {
3422                                 netif_err(qdev, ifup, qdev->ndev,
3423                                           "Failed request for MSIX interrupt %d.\n",
3424                                           i);
3425                                 goto err_irq;
3426                         }
3427                 } else {
3428                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3429                                      "trying msi or legacy interrupts.\n");
3430                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3431                                      "%s: irq = %d.\n", __func__, pdev->irq);
3432                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3433                                      "%s: context->name = %s.\n", __func__,
3434                                      intr_context->name);
3435                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3436                                      "%s: dev_id = 0x%p.\n", __func__,
3437                                      &qdev->rx_ring[0]);
3438                         status =
3439                             request_irq(pdev->irq, qlge_isr,
3440                                         test_bit(QL_MSI_ENABLED,
3441                                                  &qdev->
3442                                                  flags) ? 0 : IRQF_SHARED,
3443                                         intr_context->name, &qdev->rx_ring[0]);
3444                         if (status)
3445                                 goto err_irq;
3446
3447                         netif_err(qdev, ifup, qdev->ndev,
3448                                   "Hooked intr 0, queue type RX_Q, with name %s.\n",
3449                                   intr_context->name);
3450                 }
3451                 intr_context->hooked = 1;
3452         }
3453         return status;
3454 err_irq:
3455         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3456         ql_free_irq(qdev);
3457         return status;
3458 }
3459
3460 static int ql_start_rss(struct ql_adapter *qdev)
3461 {
3462         static const u8 init_hash_seed[] = {
3463                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3464                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3465                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3466                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3467                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3468         };
3469         struct ricb *ricb = &qdev->ricb;
3470         int status = 0;
3471         int i;
3472         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3473
3474         memset((void *)ricb, 0, sizeof(*ricb));
3475
3476         ricb->base_cq = RSS_L4K;
3477         ricb->flags =
3478                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3479         ricb->mask = cpu_to_le16((u16)(0x3ff));
3480
3481         /*
3482          * Fill out the Indirection Table.
3483          */
3484         for (i = 0; i < 1024; i++)
3485                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3486
3487         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3488         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3489
3490         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3491         if (status) {
3492                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3493                 return status;
3494         }
3495         return status;
3496 }
3497
3498 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3499 {
3500         int i, status = 0;
3501
3502         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3503         if (status)
3504                 return status;
3505         /* Clear all the entries in the routing table. */
3506         for (i = 0; i < 16; i++) {
3507                 status = ql_set_routing_reg(qdev, i, 0, 0);
3508                 if (status) {
3509                         netif_err(qdev, ifup, qdev->ndev,
3510                                   "Failed to init routing register for CAM packets.\n");
3511                         break;
3512                 }
3513         }
3514         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3515         return status;
3516 }
3517
3518 /* Initialize the frame-to-queue routing. */
3519 static int ql_route_initialize(struct ql_adapter *qdev)
3520 {
3521         int status = 0;
3522
3523         /* Clear all the entries in the routing table. */
3524         status = ql_clear_routing_entries(qdev);
3525         if (status)
3526                 return status;
3527
3528         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3529         if (status)
3530                 return status;
3531
3532         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3533                                                 RT_IDX_IP_CSUM_ERR, 1);
3534         if (status) {
3535                 netif_err(qdev, ifup, qdev->ndev,
3536                         "Failed to init routing register "
3537                         "for IP CSUM error packets.\n");
3538                 goto exit;
3539         }
3540         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3541                                                 RT_IDX_TU_CSUM_ERR, 1);
3542         if (status) {
3543                 netif_err(qdev, ifup, qdev->ndev,
3544                         "Failed to init routing register "
3545                         "for TCP/UDP CSUM error packets.\n");
3546                 goto exit;
3547         }
3548         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3549         if (status) {
3550                 netif_err(qdev, ifup, qdev->ndev,
3551                           "Failed to init routing register for broadcast packets.\n");
3552                 goto exit;
3553         }
3554         /* If we have more than one inbound queue, then turn on RSS in the
3555          * routing block.
3556          */
3557         if (qdev->rss_ring_count > 1) {
3558                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3559                                         RT_IDX_RSS_MATCH, 1);
3560                 if (status) {
3561                         netif_err(qdev, ifup, qdev->ndev,
3562                                   "Failed to init routing register for MATCH RSS packets.\n");
3563                         goto exit;
3564                 }
3565         }
3566
3567         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3568                                     RT_IDX_CAM_HIT, 1);
3569         if (status)
3570                 netif_err(qdev, ifup, qdev->ndev,
3571                           "Failed to init routing register for CAM packets.\n");
3572 exit:
3573         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3574         return status;
3575 }
3576
3577 int ql_cam_route_initialize(struct ql_adapter *qdev)
3578 {
3579         int status, set;
3580
3581         /* If check if the link is up and use to
3582          * determine if we are setting or clearing
3583          * the MAC address in the CAM.
3584          */
3585         set = ql_read32(qdev, STS);
3586         set &= qdev->port_link_up;
3587         status = ql_set_mac_addr(qdev, set);
3588         if (status) {
3589                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3590                 return status;
3591         }
3592
3593         status = ql_route_initialize(qdev);
3594         if (status)
3595                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3596
3597         return status;
3598 }
3599
3600 static int ql_adapter_initialize(struct ql_adapter *qdev)
3601 {
3602         u32 value, mask;
3603         int i;
3604         int status = 0;
3605
3606         /*
3607          * Set up the System register to halt on errors.
3608          */
3609         value = SYS_EFE | SYS_FAE;
3610         mask = value << 16;
3611         ql_write32(qdev, SYS, mask | value);
3612
3613         /* Set the default queue, and VLAN behavior. */
3614         value = NIC_RCV_CFG_DFQ;
3615         mask = NIC_RCV_CFG_DFQ_MASK;
3616         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3617                 value |= NIC_RCV_CFG_RV;
3618                 mask |= (NIC_RCV_CFG_RV << 16);
3619         }
3620         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3621
3622         /* Set the MPI interrupt to enabled. */
3623         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3624
3625         /* Enable the function, set pagesize, enable error checking. */
3626         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3627             FSC_EC | FSC_VM_PAGE_4K;
3628         value |= SPLT_SETTING;
3629
3630         /* Set/clear header splitting. */
3631         mask = FSC_VM_PAGESIZE_MASK |
3632             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3633         ql_write32(qdev, FSC, mask | value);
3634
3635         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3636
3637         /* Set RX packet routing to use port/pci function on which the
3638          * packet arrived on in addition to usual frame routing.
3639          * This is helpful on bonding where both interfaces can have
3640          * the same MAC address.
3641          */
3642         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3643         /* Reroute all packets to our Interface.
3644          * They may have been routed to MPI firmware
3645          * due to WOL.
3646          */
3647         value = ql_read32(qdev, MGMT_RCV_CFG);
3648         value &= ~MGMT_RCV_CFG_RM;
3649         mask = 0xffff0000;
3650
3651         /* Sticky reg needs clearing due to WOL. */
3652         ql_write32(qdev, MGMT_RCV_CFG, mask);
3653         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3654
3655         /* Default WOL is enable on Mezz cards */
3656         if (qdev->pdev->subsystem_device == 0x0068 ||
3657                         qdev->pdev->subsystem_device == 0x0180)
3658                 qdev->wol = WAKE_MAGIC;
3659
3660         /* Start up the rx queues. */
3661         for (i = 0; i < qdev->rx_ring_count; i++) {
3662                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3663                 if (status) {
3664                         netif_err(qdev, ifup, qdev->ndev,
3665                                   "Failed to start rx ring[%d].\n", i);
3666                         return status;
3667                 }
3668         }
3669
3670         /* If there is more than one inbound completion queue
3671          * then download a RICB to configure RSS.
3672          */
3673         if (qdev->rss_ring_count > 1) {
3674                 status = ql_start_rss(qdev);
3675                 if (status) {
3676                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3677                         return status;
3678                 }
3679         }
3680
3681         /* Start up the tx queues. */
3682         for (i = 0; i < qdev->tx_ring_count; i++) {
3683                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3684                 if (status) {
3685                         netif_err(qdev, ifup, qdev->ndev,
3686                                   "Failed to start tx ring[%d].\n", i);
3687                         return status;
3688                 }
3689         }
3690
3691         /* Initialize the port and set the max framesize. */
3692         status = qdev->nic_ops->port_initialize(qdev);
3693         if (status)
3694                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3695
3696         /* Set up the MAC address and frame routing filter. */
3697         status = ql_cam_route_initialize(qdev);
3698         if (status) {
3699                 netif_err(qdev, ifup, qdev->ndev,
3700                           "Failed to init CAM/Routing tables.\n");
3701                 return status;
3702         }
3703
3704         /* Start NAPI for the RSS queues. */
3705         for (i = 0; i < qdev->rss_ring_count; i++)
3706                 napi_enable(&qdev->rx_ring[i].napi);
3707
3708         return status;
3709 }
3710
3711 /* Issue soft reset to chip. */
3712 static int ql_adapter_reset(struct ql_adapter *qdev)
3713 {
3714         u32 value;
3715         int status = 0;
3716         unsigned long end_jiffies;
3717
3718         /* Clear all the entries in the routing table. */
3719         status = ql_clear_routing_entries(qdev);
3720         if (status) {
3721                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3722                 return status;
3723         }
3724
3725         /* Check if bit is set then skip the mailbox command and
3726          * clear the bit, else we are in normal reset process.
3727          */
3728         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3729                 /* Stop management traffic. */
3730                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3731
3732                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3733                 ql_wait_fifo_empty(qdev);
3734         } else
3735                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3736
3737         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3738
3739         end_jiffies = jiffies + usecs_to_jiffies(30);
3740         do {
3741                 value = ql_read32(qdev, RST_FO);
3742                 if ((value & RST_FO_FR) == 0)
3743                         break;
3744                 cpu_relax();
3745         } while (time_before(jiffies, end_jiffies));
3746
3747         if (value & RST_FO_FR) {
3748                 netif_err(qdev, ifdown, qdev->ndev,
3749                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3750                 status = -ETIMEDOUT;
3751         }
3752
3753         /* Resume management traffic. */
3754         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3755         return status;
3756 }
3757
3758 static void ql_display_dev_info(struct net_device *ndev)
3759 {
3760         struct ql_adapter *qdev = netdev_priv(ndev);
3761
3762         netif_info(qdev, probe, qdev->ndev,
3763                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3764                    "XG Roll = %d, XG Rev = %d.\n",
3765                    qdev->func,
3766                    qdev->port,
3767                    qdev->chip_rev_id & 0x0000000f,
3768                    qdev->chip_rev_id >> 4 & 0x0000000f,
3769                    qdev->chip_rev_id >> 8 & 0x0000000f,
3770                    qdev->chip_rev_id >> 12 & 0x0000000f);
3771         netif_info(qdev, probe, qdev->ndev,
3772                    "MAC address %pM\n", ndev->dev_addr);
3773 }
3774
3775 static int ql_wol(struct ql_adapter *qdev)
3776 {
3777         int status = 0;
3778         u32 wol = MB_WOL_DISABLE;
3779
3780         /* The CAM is still intact after a reset, but if we
3781          * are doing WOL, then we may need to program the
3782          * routing regs. We would also need to issue the mailbox
3783          * commands to instruct the MPI what to do per the ethtool
3784          * settings.
3785          */
3786
3787         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3788                         WAKE_MCAST | WAKE_BCAST)) {
3789                 netif_err(qdev, ifdown, qdev->ndev,
3790                           "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3791                           qdev->wol);
3792                 return -EINVAL;
3793         }
3794
3795         if (qdev->wol & WAKE_MAGIC) {
3796                 status = ql_mb_wol_set_magic(qdev, 1);
3797                 if (status) {
3798                         netif_err(qdev, ifdown, qdev->ndev,
3799                                   "Failed to set magic packet on %s.\n",
3800                                   qdev->ndev->name);
3801                         return status;
3802                 } else
3803                         netif_info(qdev, drv, qdev->ndev,
3804                                    "Enabled magic packet successfully on %s.\n",
3805                                    qdev->ndev->name);
3806
3807                 wol |= MB_WOL_MAGIC_PKT;
3808         }
3809
3810         if (qdev->wol) {
3811                 wol |= MB_WOL_MODE_ON;
3812                 status = ql_mb_wol_mode(qdev, wol);
3813                 netif_err(qdev, drv, qdev->ndev,
3814                           "WOL %s (wol code 0x%x) on %s\n",
3815                           (status == 0) ? "Successfully set" : "Failed",
3816                           wol, qdev->ndev->name);
3817         }
3818
3819         return status;
3820 }
3821
3822 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3823 {
3824
3825         /* Don't kill the reset worker thread if we
3826          * are in the process of recovery.
3827          */
3828         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3829                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3830         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3831         cancel_delayed_work_sync(&qdev->mpi_work);
3832         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3833         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3834         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3835 }
3836
3837 static int ql_adapter_down(struct ql_adapter *qdev)
3838 {
3839         int i, status = 0;
3840
3841         ql_link_off(qdev);
3842
3843         ql_cancel_all_work_sync(qdev);
3844
3845         for (i = 0; i < qdev->rss_ring_count; i++)
3846                 napi_disable(&qdev->rx_ring[i].napi);
3847
3848         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3849
3850         ql_disable_interrupts(qdev);
3851
3852         ql_tx_ring_clean(qdev);
3853
3854         /* Call netif_napi_del() from common point.
3855          */
3856         for (i = 0; i < qdev->rss_ring_count; i++)
3857                 netif_napi_del(&qdev->rx_ring[i].napi);
3858
3859         status = ql_adapter_reset(qdev);
3860         if (status)
3861                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3862                           qdev->func);
3863         ql_free_rx_buffers(qdev);
3864
3865         return status;
3866 }
3867
3868 static int ql_adapter_up(struct ql_adapter *qdev)
3869 {
3870         int err = 0;
3871
3872         err = ql_adapter_initialize(qdev);
3873         if (err) {
3874                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3875                 goto err_init;
3876         }
3877         set_bit(QL_ADAPTER_UP, &qdev->flags);
3878         ql_alloc_rx_buffers(qdev);
3879         /* If the port is initialized and the
3880          * link is up the turn on the carrier.
3881          */
3882         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3883                         (ql_read32(qdev, STS) & qdev->port_link_up))
3884                 ql_link_on(qdev);
3885         /* Restore rx mode. */
3886         clear_bit(QL_ALLMULTI, &qdev->flags);
3887         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3888         qlge_set_multicast_list(qdev->ndev);
3889
3890         /* Restore vlan setting. */
3891         qlge_restore_vlan(qdev);
3892
3893         ql_enable_interrupts(qdev);
3894         ql_enable_all_completion_interrupts(qdev);
3895         netif_tx_start_all_queues(qdev->ndev);
3896
3897         return 0;
3898 err_init:
3899         ql_adapter_reset(qdev);
3900         return err;
3901 }
3902
3903 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3904 {
3905         ql_free_mem_resources(qdev);
3906         ql_free_irq(qdev);
3907 }
3908
3909 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3910 {
3911         int status = 0;
3912
3913         if (ql_alloc_mem_resources(qdev)) {
3914                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3915                 return -ENOMEM;
3916         }
3917         status = ql_request_irq(qdev);
3918         return status;
3919 }
3920
3921 static int qlge_close(struct net_device *ndev)
3922 {
3923         struct ql_adapter *qdev = netdev_priv(ndev);
3924         int i;
3925
3926         /* If we hit pci_channel_io_perm_failure
3927          * failure condition, then we already
3928          * brought the adapter down.
3929          */
3930         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3931                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3932                 clear_bit(QL_EEH_FATAL, &qdev->flags);
3933                 return 0;
3934         }
3935
3936         /*
3937          * Wait for device to recover from a reset.
3938          * (Rarely happens, but possible.)
3939          */
3940         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3941                 msleep(1);
3942
3943         /* Make sure refill_work doesn't re-enable napi */
3944         for (i = 0; i < qdev->rss_ring_count; i++)
3945                 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3946
3947         ql_adapter_down(qdev);
3948         ql_release_adapter_resources(qdev);
3949         return 0;
3950 }
3951
3952 static void qlge_set_lb_size(struct ql_adapter *qdev)
3953 {
3954         if (qdev->ndev->mtu <= 1500)
3955                 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3956         else
3957                 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3958         qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3959 }
3960
3961 static int ql_configure_rings(struct ql_adapter *qdev)
3962 {
3963         int i;
3964         struct rx_ring *rx_ring;
3965         struct tx_ring *tx_ring;
3966         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3967
3968         /* In a perfect world we have one RSS ring for each CPU
3969          * and each has it's own vector.  To do that we ask for
3970          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3971          * vector count to what we actually get.  We then
3972          * allocate an RSS ring for each.
3973          * Essentially, we are doing min(cpu_count, msix_vector_count).
3974          */
3975         qdev->intr_count = cpu_cnt;
3976         ql_enable_msix(qdev);
3977         /* Adjust the RSS ring count to the actual vector count. */
3978         qdev->rss_ring_count = qdev->intr_count;
3979         qdev->tx_ring_count = cpu_cnt;
3980         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3981
3982         for (i = 0; i < qdev->tx_ring_count; i++) {
3983                 tx_ring = &qdev->tx_ring[i];
3984                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3985                 tx_ring->qdev = qdev;
3986                 tx_ring->wq_id = i;
3987                 tx_ring->wq_len = qdev->tx_ring_size;
3988                 tx_ring->wq_size =
3989                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3990
3991                 /*
3992                  * The completion queue ID for the tx rings start
3993                  * immediately after the rss rings.
3994                  */
3995                 tx_ring->cq_id = qdev->rss_ring_count + i;
3996         }
3997
3998         for (i = 0; i < qdev->rx_ring_count; i++) {
3999                 rx_ring = &qdev->rx_ring[i];
4000                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4001                 rx_ring->qdev = qdev;
4002                 rx_ring->cq_id = i;
4003                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4004                 if (i < qdev->rss_ring_count) {
4005                         /*
4006                          * Inbound (RSS) queues.
4007                          */
4008                         rx_ring->cq_len = qdev->rx_ring_size;
4009                         rx_ring->cq_size =
4010                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4011                         rx_ring->lbq.type = QLGE_LB;
4012                         rx_ring->sbq.type = QLGE_SB;
4013                         INIT_DELAYED_WORK(&rx_ring->refill_work,
4014                                           &qlge_slow_refill);
4015                 } else {
4016                         /*
4017                          * Outbound queue handles outbound completions only.
4018                          */
4019                         /* outbound cq is same size as tx_ring it services. */
4020                         rx_ring->cq_len = qdev->tx_ring_size;
4021                         rx_ring->cq_size =
4022                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4023                 }
4024         }
4025         return 0;
4026 }
4027
4028 static int qlge_open(struct net_device *ndev)
4029 {
4030         int err = 0;
4031         struct ql_adapter *qdev = netdev_priv(ndev);
4032
4033         err = ql_adapter_reset(qdev);
4034         if (err)
4035                 return err;
4036
4037         qlge_set_lb_size(qdev);
4038         err = ql_configure_rings(qdev);
4039         if (err)
4040                 return err;
4041
4042         err = ql_get_adapter_resources(qdev);
4043         if (err)
4044                 goto error_up;
4045
4046         err = ql_adapter_up(qdev);
4047         if (err)
4048                 goto error_up;
4049
4050         return err;
4051
4052 error_up:
4053         ql_release_adapter_resources(qdev);
4054         return err;
4055 }
4056
4057 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4058 {
4059         int status;
4060
4061         /* Wait for an outstanding reset to complete. */
4062         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4063                 int i = 4;
4064
4065                 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4066                         netif_err(qdev, ifup, qdev->ndev,
4067                                   "Waiting for adapter UP...\n");
4068                         ssleep(1);
4069                 }
4070
4071                 if (!i) {
4072                         netif_err(qdev, ifup, qdev->ndev,
4073                                   "Timed out waiting for adapter UP\n");
4074                         return -ETIMEDOUT;
4075                 }
4076         }
4077
4078         status = ql_adapter_down(qdev);
4079         if (status)
4080                 goto error;
4081
4082         qlge_set_lb_size(qdev);
4083
4084         status = ql_adapter_up(qdev);
4085         if (status)
4086                 goto error;
4087
4088         return status;
4089 error:
4090         netif_alert(qdev, ifup, qdev->ndev,
4091                     "Driver up/down cycle failed, closing device.\n");
4092         set_bit(QL_ADAPTER_UP, &qdev->flags);
4093         dev_close(qdev->ndev);
4094         return status;
4095 }
4096
4097 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4098 {
4099         struct ql_adapter *qdev = netdev_priv(ndev);
4100         int status;
4101
4102         if (ndev->mtu == 1500 && new_mtu == 9000) {
4103                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4104         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4105                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4106         } else
4107                 return -EINVAL;
4108
4109         queue_delayed_work(qdev->workqueue,
4110                         &qdev->mpi_port_cfg_work, 3*HZ);
4111
4112         ndev->mtu = new_mtu;
4113
4114         if (!netif_running(qdev->ndev)) {
4115                 return 0;
4116         }
4117
4118         status = ql_change_rx_buffers(qdev);
4119         if (status) {
4120                 netif_err(qdev, ifup, qdev->ndev,
4121                           "Changing MTU failed.\n");
4122         }
4123
4124         return status;
4125 }
4126
4127 static struct net_device_stats *qlge_get_stats(struct net_device
4128                                                *ndev)
4129 {
4130         struct ql_adapter *qdev = netdev_priv(ndev);
4131         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4132         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4133         unsigned long pkts, mcast, dropped, errors, bytes;
4134         int i;
4135
4136         /* Get RX stats. */
4137         pkts = mcast = dropped = errors = bytes = 0;
4138         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4139                         pkts += rx_ring->rx_packets;
4140                         bytes += rx_ring->rx_bytes;
4141                         dropped += rx_ring->rx_dropped;
4142                         errors += rx_ring->rx_errors;
4143                         mcast += rx_ring->rx_multicast;
4144         }
4145         ndev->stats.rx_packets = pkts;
4146         ndev->stats.rx_bytes = bytes;
4147         ndev->stats.rx_dropped = dropped;
4148         ndev->stats.rx_errors = errors;
4149         ndev->stats.multicast = mcast;
4150
4151         /* Get TX stats. */
4152         pkts = errors = bytes = 0;
4153         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4154                         pkts += tx_ring->tx_packets;
4155                         bytes += tx_ring->tx_bytes;
4156                         errors += tx_ring->tx_errors;
4157         }
4158         ndev->stats.tx_packets = pkts;
4159         ndev->stats.tx_bytes = bytes;
4160         ndev->stats.tx_errors = errors;
4161         return &ndev->stats;
4162 }
4163
4164 static void qlge_set_multicast_list(struct net_device *ndev)
4165 {
4166         struct ql_adapter *qdev = netdev_priv(ndev);
4167         struct netdev_hw_addr *ha;
4168         int i, status;
4169
4170         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4171         if (status)
4172                 return;
4173         /*
4174          * Set or clear promiscuous mode if a
4175          * transition is taking place.
4176          */
4177         if (ndev->flags & IFF_PROMISC) {
4178                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4179                         if (ql_set_routing_reg
4180                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4181                                 netif_err(qdev, hw, qdev->ndev,
4182                                           "Failed to set promiscuous mode.\n");
4183                         } else {
4184                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4185                         }
4186                 }
4187         } else {
4188                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4189                         if (ql_set_routing_reg
4190                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4191                                 netif_err(qdev, hw, qdev->ndev,
4192                                           "Failed to clear promiscuous mode.\n");
4193                         } else {
4194                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4195                         }
4196                 }
4197         }
4198
4199         /*
4200          * Set or clear all multicast mode if a
4201          * transition is taking place.
4202          */
4203         if ((ndev->flags & IFF_ALLMULTI) ||
4204             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4205                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4206                         if (ql_set_routing_reg
4207                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4208                                 netif_err(qdev, hw, qdev->ndev,
4209                                           "Failed to set all-multi mode.\n");
4210                         } else {
4211                                 set_bit(QL_ALLMULTI, &qdev->flags);
4212                         }
4213                 }
4214         } else {
4215                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4216                         if (ql_set_routing_reg
4217                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4218                                 netif_err(qdev, hw, qdev->ndev,
4219                                           "Failed to clear all-multi mode.\n");
4220                         } else {
4221                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4222                         }
4223                 }
4224         }
4225
4226         if (!netdev_mc_empty(ndev)) {
4227                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4228                 if (status)
4229                         goto exit;
4230                 i = 0;
4231                 netdev_for_each_mc_addr(ha, ndev) {
4232                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4233                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4234                                 netif_err(qdev, hw, qdev->ndev,
4235                                           "Failed to loadmulticast address.\n");
4236                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4237                                 goto exit;
4238                         }
4239                         i++;
4240                 }
4241                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4242                 if (ql_set_routing_reg
4243                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4244                         netif_err(qdev, hw, qdev->ndev,
4245                                   "Failed to set multicast match mode.\n");
4246                 } else {
4247                         set_bit(QL_ALLMULTI, &qdev->flags);
4248                 }
4249         }
4250 exit:
4251         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4252 }
4253
4254 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4255 {
4256         struct ql_adapter *qdev = netdev_priv(ndev);
4257         struct sockaddr *addr = p;
4258         int status;
4259
4260         if (!is_valid_ether_addr(addr->sa_data))
4261                 return -EADDRNOTAVAIL;
4262         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4263         /* Update local copy of current mac address. */
4264         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4265
4266         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4267         if (status)
4268                 return status;
4269         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4270                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4271         if (status)
4272                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4273         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4274         return status;
4275 }
4276
4277 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4278 {
4279         struct ql_adapter *qdev = netdev_priv(ndev);
4280         ql_queue_asic_error(qdev);
4281 }
4282
4283 static void ql_asic_reset_work(struct work_struct *work)
4284 {
4285         struct ql_adapter *qdev =
4286             container_of(work, struct ql_adapter, asic_reset_work.work);
4287         int status;
4288         rtnl_lock();
4289         status = ql_adapter_down(qdev);
4290         if (status)
4291                 goto error;
4292
4293         status = ql_adapter_up(qdev);
4294         if (status)
4295                 goto error;
4296
4297         /* Restore rx mode. */
4298         clear_bit(QL_ALLMULTI, &qdev->flags);
4299         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4300         qlge_set_multicast_list(qdev->ndev);
4301
4302         rtnl_unlock();
4303         return;
4304 error:
4305         netif_alert(qdev, ifup, qdev->ndev,
4306                     "Driver up/down cycle failed, closing device\n");
4307
4308         set_bit(QL_ADAPTER_UP, &qdev->flags);
4309         dev_close(qdev->ndev);
4310         rtnl_unlock();
4311 }
4312
4313 static const struct nic_operations qla8012_nic_ops = {
4314         .get_flash              = ql_get_8012_flash_params,
4315         .port_initialize        = ql_8012_port_initialize,
4316 };
4317
4318 static const struct nic_operations qla8000_nic_ops = {
4319         .get_flash              = ql_get_8000_flash_params,
4320         .port_initialize        = ql_8000_port_initialize,
4321 };
4322
4323 /* Find the pcie function number for the other NIC
4324  * on this chip.  Since both NIC functions share a
4325  * common firmware we have the lowest enabled function
4326  * do any common work.  Examples would be resetting
4327  * after a fatal firmware error, or doing a firmware
4328  * coredump.
4329  */
4330 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4331 {
4332         int status = 0;
4333         u32 temp;
4334         u32 nic_func1, nic_func2;
4335
4336         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4337                         &temp);
4338         if (status)
4339                 return status;
4340
4341         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4342                         MPI_TEST_NIC_FUNC_MASK);
4343         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4344                         MPI_TEST_NIC_FUNC_MASK);
4345
4346         if (qdev->func == nic_func1)
4347                 qdev->alt_func = nic_func2;
4348         else if (qdev->func == nic_func2)
4349                 qdev->alt_func = nic_func1;
4350         else
4351                 status = -EIO;
4352
4353         return status;
4354 }
4355
4356 static int ql_get_board_info(struct ql_adapter *qdev)
4357 {
4358         int status;
4359         qdev->func =
4360             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4361         if (qdev->func > 3)
4362                 return -EIO;
4363
4364         status = ql_get_alt_pcie_func(qdev);
4365         if (status)
4366                 return status;
4367
4368         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4369         if (qdev->port) {
4370                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4371                 qdev->port_link_up = STS_PL1;
4372                 qdev->port_init = STS_PI1;
4373                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4374                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4375         } else {
4376                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4377                 qdev->port_link_up = STS_PL0;
4378                 qdev->port_init = STS_PI0;
4379                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4380                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4381         }
4382         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4383         qdev->device_id = qdev->pdev->device;
4384         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4385                 qdev->nic_ops = &qla8012_nic_ops;
4386         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4387                 qdev->nic_ops = &qla8000_nic_ops;
4388         return status;
4389 }
4390
4391 static void ql_release_all(struct pci_dev *pdev)
4392 {
4393         struct net_device *ndev = pci_get_drvdata(pdev);
4394         struct ql_adapter *qdev = netdev_priv(ndev);
4395
4396         if (qdev->workqueue) {
4397                 destroy_workqueue(qdev->workqueue);
4398                 qdev->workqueue = NULL;
4399         }
4400
4401         if (qdev->reg_base)
4402                 iounmap(qdev->reg_base);
4403         if (qdev->doorbell_area)
4404                 iounmap(qdev->doorbell_area);
4405         vfree(qdev->mpi_coredump);
4406         pci_release_regions(pdev);
4407 }
4408
4409 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4410                           int cards_found)
4411 {
4412         struct ql_adapter *qdev = netdev_priv(ndev);
4413         int err = 0;
4414
4415         memset((void *)qdev, 0, sizeof(*qdev));
4416         err = pci_enable_device(pdev);
4417         if (err) {
4418                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4419                 return err;
4420         }
4421
4422         qdev->ndev = ndev;
4423         qdev->pdev = pdev;
4424         pci_set_drvdata(pdev, ndev);
4425
4426         /* Set PCIe read request size */
4427         err = pcie_set_readrq(pdev, 4096);
4428         if (err) {
4429                 dev_err(&pdev->dev, "Set readrq failed.\n");
4430                 goto err_out1;
4431         }
4432
4433         err = pci_request_regions(pdev, DRV_NAME);
4434         if (err) {
4435                 dev_err(&pdev->dev, "PCI region request failed.\n");
4436                 return err;
4437         }
4438
4439         pci_set_master(pdev);
4440         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4441                 set_bit(QL_DMA64, &qdev->flags);
4442                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4443         } else {
4444                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4445                 if (!err)
4446                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4447         }
4448
4449         if (err) {
4450                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4451                 goto err_out2;
4452         }
4453
4454         /* Set PCIe reset type for EEH to fundamental. */
4455         pdev->needs_freset = 1;
4456         pci_save_state(pdev);
4457         qdev->reg_base =
4458             ioremap(pci_resource_start(pdev, 1),
4459                             pci_resource_len(pdev, 1));
4460         if (!qdev->reg_base) {
4461                 dev_err(&pdev->dev, "Register mapping failed.\n");
4462                 err = -ENOMEM;
4463                 goto err_out2;
4464         }
4465
4466         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4467         qdev->doorbell_area =
4468             ioremap(pci_resource_start(pdev, 3),
4469                             pci_resource_len(pdev, 3));
4470         if (!qdev->doorbell_area) {
4471                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4472                 err = -ENOMEM;
4473                 goto err_out2;
4474         }
4475
4476         err = ql_get_board_info(qdev);
4477         if (err) {
4478                 dev_err(&pdev->dev, "Register access failed.\n");
4479                 err = -EIO;
4480                 goto err_out2;
4481         }
4482         qdev->msg_enable = netif_msg_init(debug, default_msg);
4483         spin_lock_init(&qdev->stats_lock);
4484
4485         if (qlge_mpi_coredump) {
4486                 qdev->mpi_coredump =
4487                         vmalloc(sizeof(struct ql_mpi_coredump));
4488                 if (!qdev->mpi_coredump) {
4489                         err = -ENOMEM;
4490                         goto err_out2;
4491                 }
4492                 if (qlge_force_coredump)
4493                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4494         }
4495         /* make sure the EEPROM is good */
4496         err = qdev->nic_ops->get_flash(qdev);
4497         if (err) {
4498                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4499                 goto err_out2;
4500         }
4501
4502         /* Keep local copy of current mac address. */
4503         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4504
4505         /* Set up the default ring sizes. */
4506         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4507         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4508
4509         /* Set up the coalescing parameters. */
4510         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4511         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4512         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4513         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4514
4515         /*
4516          * Set up the operating parameters.
4517          */
4518         qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4519                                                   ndev->name);
4520         if (!qdev->workqueue) {
4521                 err = -ENOMEM;
4522                 goto err_out2;
4523         }
4524
4525         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4526         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4527         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4528         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4529         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4530         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4531         init_completion(&qdev->ide_completion);
4532         mutex_init(&qdev->mpi_mutex);
4533
4534         if (!cards_found) {
4535                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4536                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4537                          DRV_NAME, DRV_VERSION);
4538         }
4539         return 0;
4540 err_out2:
4541         ql_release_all(pdev);
4542 err_out1:
4543         pci_disable_device(pdev);
4544         return err;
4545 }
4546
4547 static const struct net_device_ops qlge_netdev_ops = {
4548         .ndo_open               = qlge_open,
4549         .ndo_stop               = qlge_close,
4550         .ndo_start_xmit         = qlge_send,
4551         .ndo_change_mtu         = qlge_change_mtu,
4552         .ndo_get_stats          = qlge_get_stats,
4553         .ndo_set_rx_mode        = qlge_set_multicast_list,
4554         .ndo_set_mac_address    = qlge_set_mac_address,
4555         .ndo_validate_addr      = eth_validate_addr,
4556         .ndo_tx_timeout         = qlge_tx_timeout,
4557         .ndo_set_features       = qlge_set_features,
4558         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4559         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4560 };
4561
4562 static void ql_timer(struct timer_list *t)
4563 {
4564         struct ql_adapter *qdev = from_timer(qdev, t, timer);
4565         u32 var = 0;
4566
4567         var = ql_read32(qdev, STS);
4568         if (pci_channel_offline(qdev->pdev)) {
4569                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4570                 return;
4571         }
4572
4573         mod_timer(&qdev->timer, jiffies + (5*HZ));
4574 }
4575
4576 static int qlge_probe(struct pci_dev *pdev,
4577                       const struct pci_device_id *pci_entry)
4578 {
4579         struct net_device *ndev = NULL;
4580         struct ql_adapter *qdev = NULL;
4581         static int cards_found = 0;
4582         int err = 0;
4583
4584         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4585                         min(MAX_CPUS, netif_get_num_default_rss_queues()));
4586         if (!ndev)
4587                 return -ENOMEM;
4588
4589         err = ql_init_device(pdev, ndev, cards_found);
4590         if (err < 0) {
4591                 free_netdev(ndev);
4592                 return err;
4593         }
4594
4595         qdev = netdev_priv(ndev);
4596         SET_NETDEV_DEV(ndev, &pdev->dev);
4597         ndev->hw_features = NETIF_F_SG |
4598                             NETIF_F_IP_CSUM |
4599                             NETIF_F_TSO |
4600                             NETIF_F_TSO_ECN |
4601                             NETIF_F_HW_VLAN_CTAG_TX |
4602                             NETIF_F_HW_VLAN_CTAG_RX |
4603                             NETIF_F_HW_VLAN_CTAG_FILTER |
4604                             NETIF_F_RXCSUM;
4605         ndev->features = ndev->hw_features;
4606         ndev->vlan_features = ndev->hw_features;
4607         /* vlan gets same features (except vlan filter) */
4608         ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4609                                  NETIF_F_HW_VLAN_CTAG_TX |
4610                                  NETIF_F_HW_VLAN_CTAG_RX);
4611
4612         if (test_bit(QL_DMA64, &qdev->flags))
4613                 ndev->features |= NETIF_F_HIGHDMA;
4614
4615         /*
4616          * Set up net_device structure.
4617          */
4618         ndev->tx_queue_len = qdev->tx_ring_size;
4619         ndev->irq = pdev->irq;
4620
4621         ndev->netdev_ops = &qlge_netdev_ops;
4622         ndev->ethtool_ops = &qlge_ethtool_ops;
4623         ndev->watchdog_timeo = 10 * HZ;
4624
4625         /* MTU range: this driver only supports 1500 or 9000, so this only
4626          * filters out values above or below, and we'll rely on
4627          * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4628          */
4629         ndev->min_mtu = ETH_DATA_LEN;
4630         ndev->max_mtu = 9000;
4631
4632         err = register_netdev(ndev);
4633         if (err) {
4634                 dev_err(&pdev->dev, "net device registration failed.\n");
4635                 ql_release_all(pdev);
4636                 pci_disable_device(pdev);
4637                 free_netdev(ndev);
4638                 return err;
4639         }
4640         /* Start up the timer to trigger EEH if
4641          * the bus goes dead
4642          */
4643         timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4644         mod_timer(&qdev->timer, jiffies + (5*HZ));
4645         ql_link_off(qdev);
4646         ql_display_dev_info(ndev);
4647         atomic_set(&qdev->lb_count, 0);
4648         cards_found++;
4649         return 0;
4650 }
4651
4652 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4653 {
4654         return qlge_send(skb, ndev);
4655 }
4656
4657 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4658 {
4659         return ql_clean_inbound_rx_ring(rx_ring, budget);
4660 }
4661
4662 static void qlge_remove(struct pci_dev *pdev)
4663 {
4664         struct net_device *ndev = pci_get_drvdata(pdev);
4665         struct ql_adapter *qdev = netdev_priv(ndev);
4666         del_timer_sync(&qdev->timer);
4667         ql_cancel_all_work_sync(qdev);
4668         unregister_netdev(ndev);
4669         ql_release_all(pdev);
4670         pci_disable_device(pdev);
4671         free_netdev(ndev);
4672 }
4673
4674 /* Clean up resources without touching hardware. */
4675 static void ql_eeh_close(struct net_device *ndev)
4676 {
4677         int i;
4678         struct ql_adapter *qdev = netdev_priv(ndev);
4679
4680         if (netif_carrier_ok(ndev)) {
4681                 netif_carrier_off(ndev);
4682                 netif_stop_queue(ndev);
4683         }
4684
4685         /* Disabling the timer */
4686         ql_cancel_all_work_sync(qdev);
4687
4688         for (i = 0; i < qdev->rss_ring_count; i++)
4689                 netif_napi_del(&qdev->rx_ring[i].napi);
4690
4691         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4692         ql_tx_ring_clean(qdev);
4693         ql_free_rx_buffers(qdev);
4694         ql_release_adapter_resources(qdev);
4695 }
4696
4697 /*
4698  * This callback is called by the PCI subsystem whenever
4699  * a PCI bus error is detected.
4700  */
4701 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4702                                                enum pci_channel_state state)
4703 {
4704         struct net_device *ndev = pci_get_drvdata(pdev);
4705         struct ql_adapter *qdev = netdev_priv(ndev);
4706
4707         switch (state) {
4708         case pci_channel_io_normal:
4709                 return PCI_ERS_RESULT_CAN_RECOVER;
4710         case pci_channel_io_frozen:
4711                 netif_device_detach(ndev);
4712                 del_timer_sync(&qdev->timer);
4713                 if (netif_running(ndev))
4714                         ql_eeh_close(ndev);
4715                 pci_disable_device(pdev);
4716                 return PCI_ERS_RESULT_NEED_RESET;
4717         case pci_channel_io_perm_failure:
4718                 dev_err(&pdev->dev,
4719                         "%s: pci_channel_io_perm_failure.\n", __func__);
4720                 del_timer_sync(&qdev->timer);
4721                 ql_eeh_close(ndev);
4722                 set_bit(QL_EEH_FATAL, &qdev->flags);
4723                 return PCI_ERS_RESULT_DISCONNECT;
4724         }
4725
4726         /* Request a slot reset. */
4727         return PCI_ERS_RESULT_NEED_RESET;
4728 }
4729
4730 /*
4731  * This callback is called after the PCI buss has been reset.
4732  * Basically, this tries to restart the card from scratch.
4733  * This is a shortened version of the device probe/discovery code,
4734  * it resembles the first-half of the () routine.
4735  */
4736 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4737 {
4738         struct net_device *ndev = pci_get_drvdata(pdev);
4739         struct ql_adapter *qdev = netdev_priv(ndev);
4740
4741         pdev->error_state = pci_channel_io_normal;
4742
4743         pci_restore_state(pdev);
4744         if (pci_enable_device(pdev)) {
4745                 netif_err(qdev, ifup, qdev->ndev,
4746                           "Cannot re-enable PCI device after reset.\n");
4747                 return PCI_ERS_RESULT_DISCONNECT;
4748         }
4749         pci_set_master(pdev);
4750
4751         if (ql_adapter_reset(qdev)) {
4752                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4753                 set_bit(QL_EEH_FATAL, &qdev->flags);
4754                 return PCI_ERS_RESULT_DISCONNECT;
4755         }
4756
4757         return PCI_ERS_RESULT_RECOVERED;
4758 }
4759
4760 static void qlge_io_resume(struct pci_dev *pdev)
4761 {
4762         struct net_device *ndev = pci_get_drvdata(pdev);
4763         struct ql_adapter *qdev = netdev_priv(ndev);
4764         int err = 0;
4765
4766         if (netif_running(ndev)) {
4767                 err = qlge_open(ndev);
4768                 if (err) {
4769                         netif_err(qdev, ifup, qdev->ndev,
4770                                   "Device initialization failed after reset.\n");
4771                         return;
4772                 }
4773         } else {
4774                 netif_err(qdev, ifup, qdev->ndev,
4775                           "Device was not running prior to EEH.\n");
4776         }
4777         mod_timer(&qdev->timer, jiffies + (5*HZ));
4778         netif_device_attach(ndev);
4779 }
4780
4781 static const struct pci_error_handlers qlge_err_handler = {
4782         .error_detected = qlge_io_error_detected,
4783         .slot_reset = qlge_io_slot_reset,
4784         .resume = qlge_io_resume,
4785 };
4786
4787 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4788 {
4789         struct net_device *ndev = pci_get_drvdata(pdev);
4790         struct ql_adapter *qdev = netdev_priv(ndev);
4791         int err;
4792
4793         netif_device_detach(ndev);
4794         del_timer_sync(&qdev->timer);
4795
4796         if (netif_running(ndev)) {
4797                 err = ql_adapter_down(qdev);
4798                 if (!err)
4799                         return err;
4800         }
4801
4802         ql_wol(qdev);
4803         err = pci_save_state(pdev);
4804         if (err)
4805                 return err;
4806
4807         pci_disable_device(pdev);
4808
4809         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4810
4811         return 0;
4812 }
4813
4814 #ifdef CONFIG_PM
4815 static int qlge_resume(struct pci_dev *pdev)
4816 {
4817         struct net_device *ndev = pci_get_drvdata(pdev);
4818         struct ql_adapter *qdev = netdev_priv(ndev);
4819         int err;
4820
4821         pci_set_power_state(pdev, PCI_D0);
4822         pci_restore_state(pdev);
4823         err = pci_enable_device(pdev);
4824         if (err) {
4825                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4826                 return err;
4827         }
4828         pci_set_master(pdev);
4829
4830         pci_enable_wake(pdev, PCI_D3hot, 0);
4831         pci_enable_wake(pdev, PCI_D3cold, 0);
4832
4833         if (netif_running(ndev)) {
4834                 err = ql_adapter_up(qdev);
4835                 if (err)
4836                         return err;
4837         }
4838
4839         mod_timer(&qdev->timer, jiffies + (5*HZ));
4840         netif_device_attach(ndev);
4841
4842         return 0;
4843 }
4844 #endif /* CONFIG_PM */
4845
4846 static void qlge_shutdown(struct pci_dev *pdev)
4847 {
4848         qlge_suspend(pdev, PMSG_SUSPEND);
4849 }
4850
4851 static struct pci_driver qlge_driver = {
4852         .name = DRV_NAME,
4853         .id_table = qlge_pci_tbl,
4854         .probe = qlge_probe,
4855         .remove = qlge_remove,
4856 #ifdef CONFIG_PM
4857         .suspend = qlge_suspend,
4858         .resume = qlge_resume,
4859 #endif
4860         .shutdown = qlge_shutdown,
4861         .err_handler = &qlge_err_handler
4862 };
4863
4864 module_pci_driver(qlge_driver);