ef8037d0b52e137d656fb9a748c0beef065f7ab4
[linux-2.6-microblaze.git] / drivers / staging / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = -1;  /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80                  "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                  "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
86
87 static const struct pci_device_id qlge_pci_tbl[] = {
88         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
90         /* required last entry */
91         {0,}
92 };
93
94 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
95
96 static int ql_wol(struct ql_adapter *);
97 static void qlge_set_multicast_list(struct net_device *);
98 static int ql_adapter_down(struct ql_adapter *);
99 static int ql_adapter_up(struct ql_adapter *);
100
101 /* This hardware semaphore causes exclusive access to
102  * resources shared between the NIC driver, MPI firmware,
103  * FCOE firmware and the FC driver.
104  */
105 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 {
107         u32 sem_bits = 0;
108
109         switch (sem_mask) {
110         case SEM_XGMAC0_MASK:
111                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112                 break;
113         case SEM_XGMAC1_MASK:
114                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
115                 break;
116         case SEM_ICB_MASK:
117                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118                 break;
119         case SEM_MAC_ADDR_MASK:
120                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
121                 break;
122         case SEM_FLASH_MASK:
123                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
124                 break;
125         case SEM_PROBE_MASK:
126                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127                 break;
128         case SEM_RT_IDX_MASK:
129                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130                 break;
131         case SEM_PROC_REG_MASK:
132                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
133                 break;
134         default:
135                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
136                 return -EINVAL;
137         }
138
139         ql_write32(qdev, SEM, sem_bits | sem_mask);
140         return !(ql_read32(qdev, SEM) & sem_bits);
141 }
142
143 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144 {
145         unsigned int wait_count = 30;
146         do {
147                 if (!ql_sem_trylock(qdev, sem_mask))
148                         return 0;
149                 udelay(100);
150         } while (--wait_count);
151         return -ETIMEDOUT;
152 }
153
154 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155 {
156         ql_write32(qdev, SEM, sem_mask);
157         ql_read32(qdev, SEM);   /* flush */
158 }
159
160 /* This function waits for a specific bit to come ready
161  * in a given register.  It is used mostly by the initialize
162  * process, but is also used in kernel thread API such as
163  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164  */
165 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
166 {
167         u32 temp;
168         int count;
169
170         for (count = 0; count < UDELAY_COUNT; count++) {
171                 temp = ql_read32(qdev, reg);
172
173                 /* check for errors */
174                 if (temp & err_bit) {
175                         netif_alert(qdev, probe, qdev->ndev,
176                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
177                                     reg, temp);
178                         return -EIO;
179                 } else if (temp & bit) {
180                         return 0;
181                 }
182                 udelay(UDELAY_DELAY);
183         }
184         netif_alert(qdev, probe, qdev->ndev,
185                     "Timed out waiting for reg %x to come ready.\n", reg);
186         return -ETIMEDOUT;
187 }
188
189 /* The CFG register is used to download TX and RX control blocks
190  * to the chip. This function waits for an operation to complete.
191  */
192 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193 {
194         int count;
195         u32 temp;
196
197         for (count = 0; count < UDELAY_COUNT; count++) {
198                 temp = ql_read32(qdev, CFG);
199                 if (temp & CFG_LE)
200                         return -EIO;
201                 if (!(temp & bit))
202                         return 0;
203                 udelay(UDELAY_DELAY);
204         }
205         return -ETIMEDOUT;
206 }
207
208 /* Used to issue init control blocks to hw. Maps control block,
209  * sets address, triggers download, waits for completion.
210  */
211 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
212                  u16 q_id)
213 {
214         u64 map;
215         int status = 0;
216         int direction;
217         u32 mask;
218         u32 value;
219
220         direction =
221             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
222             PCI_DMA_FROMDEVICE;
223
224         map = pci_map_single(qdev->pdev, ptr, size, direction);
225         if (pci_dma_mapping_error(qdev->pdev, map)) {
226                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
227                 return -ENOMEM;
228         }
229
230         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
231         if (status)
232                 return status;
233
234         status = ql_wait_cfg(qdev, bit);
235         if (status) {
236                 netif_err(qdev, ifup, qdev->ndev,
237                           "Timed out waiting for CFG to come ready.\n");
238                 goto exit;
239         }
240
241         ql_write32(qdev, ICB_L, (u32) map);
242         ql_write32(qdev, ICB_H, (u32) (map >> 32));
243
244         mask = CFG_Q_MASK | (bit << 16);
245         value = bit | (q_id << CFG_Q_SHIFT);
246         ql_write32(qdev, CFG, (mask | value));
247
248         /*
249          * Wait for the bit to clear after signaling hw.
250          */
251         status = ql_wait_cfg(qdev, bit);
252 exit:
253         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
254         pci_unmap_single(qdev->pdev, map, size, direction);
255         return status;
256 }
257
258 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
259 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
260                         u32 *value)
261 {
262         u32 offset = 0;
263         int status;
264
265         switch (type) {
266         case MAC_ADDR_TYPE_MULTI_MAC:
267         case MAC_ADDR_TYPE_CAM_MAC:
268                 {
269                         status =
270                             ql_wait_reg_rdy(qdev,
271                                             MAC_ADDR_IDX, MAC_ADDR_MW, 0);
272                         if (status)
273                                 goto exit;
274                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
276                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277                         status =
278                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
279                         if (status)
280                                 goto exit;
281                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
282                         status =
283                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
284                         if (status)
285                                 goto exit;
286                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
288                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289                         status =
290                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
291                         if (status)
292                                 goto exit;
293                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
294                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
295                                 status =
296                                     ql_wait_reg_rdy(qdev,
297                                                     MAC_ADDR_IDX, MAC_ADDR_MW,
298                                                     0);
299                                 if (status)
300                                         goto exit;
301                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
303                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304                                 status =
305                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306                                                     MAC_ADDR_MR, 0);
307                                 if (status)
308                                         goto exit;
309                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310                         }
311                         break;
312                 }
313         case MAC_ADDR_TYPE_VLAN:
314         case MAC_ADDR_TYPE_MULTI_FLTR:
315         default:
316                 netif_crit(qdev, ifup, qdev->ndev,
317                            "Address type %d not yet supported.\n", type);
318                 status = -EPERM;
319         }
320 exit:
321         return status;
322 }
323
324 /* Set up a MAC, multicast or VLAN address for the
325  * inbound frame matching.
326  */
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328                                u16 index)
329 {
330         u32 offset = 0;
331         int status = 0;
332
333         switch (type) {
334         case MAC_ADDR_TYPE_MULTI_MAC:
335                 {
336                         u32 upper = (addr[0] << 8) | addr[1];
337                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338                                         (addr[4] << 8) | (addr[5]);
339
340                         status =
341                                 ql_wait_reg_rdy(qdev,
342                                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343                         if (status)
344                                 goto exit;
345                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346                                 (index << MAC_ADDR_IDX_SHIFT) |
347                                 type | MAC_ADDR_E);
348                         ql_write32(qdev, MAC_ADDR_DATA, lower);
349                         status =
350                                 ql_wait_reg_rdy(qdev,
351                                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352                         if (status)
353                                 goto exit;
354                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355                                 (index << MAC_ADDR_IDX_SHIFT) |
356                                 type | MAC_ADDR_E);
357
358                         ql_write32(qdev, MAC_ADDR_DATA, upper);
359                         status =
360                                 ql_wait_reg_rdy(qdev,
361                                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362                         if (status)
363                                 goto exit;
364                         break;
365                 }
366         case MAC_ADDR_TYPE_CAM_MAC:
367                 {
368                         u32 cam_output;
369                         u32 upper = (addr[0] << 8) | addr[1];
370                         u32 lower =
371                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372                             (addr[5]);
373                         status =
374                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
375                         if (status)
376                                 goto exit;
377                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
378                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
379                                    type);       /* type */
380                         ql_write32(qdev, MAC_ADDR_DATA, lower);
381                         status =
382                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383                         if (status)
384                                 goto exit;
385                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
387                                    type);       /* type */
388                         ql_write32(qdev, MAC_ADDR_DATA, upper);
389                         status =
390                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391                         if (status)
392                                 goto exit;
393                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
394                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
395                                    type);       /* type */
396                         /* This field should also include the queue id
397                          * and possibly the function id.  Right now we hardcode
398                          * the route field to NIC core.
399                          */
400                         cam_output = (CAM_OUT_ROUTE_NIC |
401                                       (qdev->
402                                        func << CAM_OUT_FUNC_SHIFT) |
403                                         (0 << CAM_OUT_CQ_ID_SHIFT));
404                         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
405                                 cam_output |= CAM_OUT_RV;
406                         /* route to NIC core */
407                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
408                         break;
409                 }
410         case MAC_ADDR_TYPE_VLAN:
411                 {
412                         u32 enable_bit = *((u32 *) &addr[0]);
413                         /* For VLAN, the addr actually holds a bit that
414                          * either enables or disables the vlan id we are
415                          * addressing. It's either MAC_ADDR_E on or off.
416                          * That's bit-27 we're talking about.
417                          */
418                         status =
419                             ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
420                         if (status)
421                                 goto exit;
422                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
423                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
424                                    type |       /* type */
425                                    enable_bit); /* enable/disable */
426                         break;
427                 }
428         case MAC_ADDR_TYPE_MULTI_FLTR:
429         default:
430                 netif_crit(qdev, ifup, qdev->ndev,
431                            "Address type %d not yet supported.\n", type);
432                 status = -EPERM;
433         }
434 exit:
435         return status;
436 }
437
438 /* Set or clear MAC address in hardware. We sometimes
439  * have to clear it to prevent wrong frame routing
440  * especially in a bonding environment.
441  */
442 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
443 {
444         int status;
445         char zero_mac_addr[ETH_ALEN];
446         char *addr;
447
448         if (set) {
449                 addr = &qdev->current_mac_addr[0];
450                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
451                              "Set Mac addr %pM\n", addr);
452         } else {
453                 eth_zero_addr(zero_mac_addr);
454                 addr = &zero_mac_addr[0];
455                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
456                              "Clearing MAC address\n");
457         }
458         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
459         if (status)
460                 return status;
461         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
462                                      MAC_ADDR_TYPE_CAM_MAC,
463                                      qdev->func * MAX_CQ);
464         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
465         if (status)
466                 netif_err(qdev, ifup, qdev->ndev,
467                           "Failed to init mac address.\n");
468         return status;
469 }
470
471 void ql_link_on(struct ql_adapter *qdev)
472 {
473         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
474         netif_carrier_on(qdev->ndev);
475         ql_set_mac_addr(qdev, 1);
476 }
477
478 void ql_link_off(struct ql_adapter *qdev)
479 {
480         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
481         netif_carrier_off(qdev->ndev);
482         ql_set_mac_addr(qdev, 0);
483 }
484
485 /* Get a specific frame routing value from the CAM.
486  * Used for debug and reg dump.
487  */
488 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
489 {
490         int status = 0;
491
492         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
493         if (status)
494                 goto exit;
495
496         ql_write32(qdev, RT_IDX,
497                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
498         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
499         if (status)
500                 goto exit;
501         *value = ql_read32(qdev, RT_DATA);
502 exit:
503         return status;
504 }
505
506 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
507  * to route different frame types to various inbound queues.  We send broadcast/
508  * multicast/error frames to the default queue for slow handling,
509  * and CAM hit/RSS frames to the fast handling queues.
510  */
511 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
512                               int enable)
513 {
514         int status = -EINVAL; /* Return error if no mask match. */
515         u32 value = 0;
516
517         switch (mask) {
518         case RT_IDX_CAM_HIT:
519                 {
520                         value = RT_IDX_DST_CAM_Q |      /* dest */
521                             RT_IDX_TYPE_NICQ |  /* type */
522                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
523                         break;
524                 }
525         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
526                 {
527                         value = RT_IDX_DST_DFLT_Q |     /* dest */
528                             RT_IDX_TYPE_NICQ |  /* type */
529                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
530                         break;
531                 }
532         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
533                 {
534                         value = RT_IDX_DST_DFLT_Q |     /* dest */
535                             RT_IDX_TYPE_NICQ |  /* type */
536                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
537                         break;
538                 }
539         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
540                 {
541                         value = RT_IDX_DST_DFLT_Q | /* dest */
542                                 RT_IDX_TYPE_NICQ | /* type */
543                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
544                                 RT_IDX_IDX_SHIFT); /* index */
545                         break;
546                 }
547         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
548                 {
549                         value = RT_IDX_DST_DFLT_Q | /* dest */
550                                 RT_IDX_TYPE_NICQ | /* type */
551                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
552                                 RT_IDX_IDX_SHIFT); /* index */
553                         break;
554                 }
555         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q |     /* dest */
558                             RT_IDX_TYPE_NICQ |  /* type */
559                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
560                         break;
561                 }
562         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
563                 {
564                         value = RT_IDX_DST_DFLT_Q |     /* dest */
565                             RT_IDX_TYPE_NICQ |  /* type */
566                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
567                         break;
568                 }
569         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
570                 {
571                         value = RT_IDX_DST_DFLT_Q |     /* dest */
572                             RT_IDX_TYPE_NICQ |  /* type */
573                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
574                         break;
575                 }
576         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
577                 {
578                         value = RT_IDX_DST_RSS |        /* dest */
579                             RT_IDX_TYPE_NICQ |  /* type */
580                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
581                         break;
582                 }
583         case 0:         /* Clear the E-bit on an entry. */
584                 {
585                         value = RT_IDX_DST_DFLT_Q |     /* dest */
586                             RT_IDX_TYPE_NICQ |  /* type */
587                             (index << RT_IDX_IDX_SHIFT);/* index */
588                         break;
589                 }
590         default:
591                 netif_err(qdev, ifup, qdev->ndev,
592                           "Mask type %d not yet supported.\n", mask);
593                 status = -EPERM;
594                 goto exit;
595         }
596
597         if (value) {
598                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
599                 if (status)
600                         goto exit;
601                 value |= (enable ? RT_IDX_E : 0);
602                 ql_write32(qdev, RT_IDX, value);
603                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
604         }
605 exit:
606         return status;
607 }
608
609 static void ql_enable_interrupts(struct ql_adapter *qdev)
610 {
611         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
612 }
613
614 static void ql_disable_interrupts(struct ql_adapter *qdev)
615 {
616         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
617 }
618
619 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
620 {
621         struct intr_context *ctx = &qdev->intr_context[intr];
622
623         ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
624 }
625
626 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
627 {
628         struct intr_context *ctx = &qdev->intr_context[intr];
629
630         ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
631 }
632
633 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
634 {
635         int i;
636
637         for (i = 0; i < qdev->intr_count; i++)
638                 ql_enable_completion_interrupt(qdev, i);
639 }
640
641 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
642 {
643         int status, i;
644         u16 csum = 0;
645         __le16 *flash = (__le16 *)&qdev->flash;
646
647         status = strncmp((char *)&qdev->flash, str, 4);
648         if (status) {
649                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
650                 return  status;
651         }
652
653         for (i = 0; i < size; i++)
654                 csum += le16_to_cpu(*flash++);
655
656         if (csum)
657                 netif_err(qdev, ifup, qdev->ndev,
658                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
659
660         return csum;
661 }
662
663 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
664 {
665         int status = 0;
666         /* wait for reg to come ready */
667         status = ql_wait_reg_rdy(qdev,
668                                  FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
669         if (status)
670                 goto exit;
671         /* set up for reg read */
672         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
673         /* wait for reg to come ready */
674         status = ql_wait_reg_rdy(qdev,
675                                  FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
676         if (status)
677                 goto exit;
678         /* This data is stored on flash as an array of
679          * __le32.  Since ql_read32() returns cpu endian
680          * we need to swap it back.
681          */
682         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
683 exit:
684         return status;
685 }
686
687 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
688 {
689         u32 i, size;
690         int status;
691         __le32 *p = (__le32 *)&qdev->flash;
692         u32 offset;
693         u8 mac_addr[6];
694
695         /* Get flash offset for function and adjust
696          * for dword access.
697          */
698         if (!qdev->port)
699                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
700         else
701                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
702
703         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
704                 return -ETIMEDOUT;
705
706         size = sizeof(struct flash_params_8000) / sizeof(u32);
707         for (i = 0; i < size; i++, p++) {
708                 status = ql_read_flash_word(qdev, i+offset, p);
709                 if (status) {
710                         netif_err(qdev, ifup, qdev->ndev,
711                                   "Error reading flash.\n");
712                         goto exit;
713                 }
714         }
715
716         status = ql_validate_flash(qdev,
717                                    sizeof(struct flash_params_8000) /
718                                    sizeof(u16),
719                                    "8000");
720         if (status) {
721                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
722                 status = -EINVAL;
723                 goto exit;
724         }
725
726         /* Extract either manufacturer or BOFM modified
727          * MAC address.
728          */
729         if (qdev->flash.flash_params_8000.data_type1 == 2)
730                 memcpy(mac_addr,
731                        qdev->flash.flash_params_8000.mac_addr1,
732                        qdev->ndev->addr_len);
733         else
734                 memcpy(mac_addr,
735                        qdev->flash.flash_params_8000.mac_addr,
736                        qdev->ndev->addr_len);
737
738         if (!is_valid_ether_addr(mac_addr)) {
739                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
740                 status = -EINVAL;
741                 goto exit;
742         }
743
744         memcpy(qdev->ndev->dev_addr,
745                mac_addr,
746                qdev->ndev->addr_len);
747
748 exit:
749         ql_sem_unlock(qdev, SEM_FLASH_MASK);
750         return status;
751 }
752
753 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
754 {
755         int i;
756         int status;
757         __le32 *p = (__le32 *)&qdev->flash;
758         u32 offset = 0;
759         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
760
761         /* Second function's parameters follow the first
762          * function's.
763          */
764         if (qdev->port)
765                 offset = size;
766
767         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
768                 return -ETIMEDOUT;
769
770         for (i = 0; i < size; i++, p++) {
771                 status = ql_read_flash_word(qdev, i+offset, p);
772                 if (status) {
773                         netif_err(qdev, ifup, qdev->ndev,
774                                   "Error reading flash.\n");
775                         goto exit;
776                 }
777
778         }
779
780         status = ql_validate_flash(qdev,
781                                    sizeof(struct flash_params_8012) /
782                                    sizeof(u16),
783                                    "8012");
784         if (status) {
785                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
786                 status = -EINVAL;
787                 goto exit;
788         }
789
790         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
791                 status = -EINVAL;
792                 goto exit;
793         }
794
795         memcpy(qdev->ndev->dev_addr,
796                qdev->flash.flash_params_8012.mac_addr,
797                qdev->ndev->addr_len);
798
799 exit:
800         ql_sem_unlock(qdev, SEM_FLASH_MASK);
801         return status;
802 }
803
804 /* xgmac register are located behind the xgmac_addr and xgmac_data
805  * register pair.  Each read/write requires us to wait for the ready
806  * bit before reading/writing the data.
807  */
808 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
809 {
810         int status;
811         /* wait for reg to come ready */
812         status = ql_wait_reg_rdy(qdev,
813                                  XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
814         if (status)
815                 return status;
816         /* write the data to the data reg */
817         ql_write32(qdev, XGMAC_DATA, data);
818         /* trigger the write */
819         ql_write32(qdev, XGMAC_ADDR, reg);
820         return status;
821 }
822
823 /* xgmac register are located behind the xgmac_addr and xgmac_data
824  * register pair.  Each read/write requires us to wait for the ready
825  * bit before reading/writing the data.
826  */
827 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
828 {
829         int status = 0;
830         /* wait for reg to come ready */
831         status = ql_wait_reg_rdy(qdev,
832                                  XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
833         if (status)
834                 goto exit;
835         /* set up for reg read */
836         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
837         /* wait for reg to come ready */
838         status = ql_wait_reg_rdy(qdev,
839                                  XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
840         if (status)
841                 goto exit;
842         /* get the data */
843         *data = ql_read32(qdev, XGMAC_DATA);
844 exit:
845         return status;
846 }
847
848 /* This is used for reading the 64-bit statistics regs. */
849 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
850 {
851         int status = 0;
852         u32 hi = 0;
853         u32 lo = 0;
854
855         status = ql_read_xgmac_reg(qdev, reg, &lo);
856         if (status)
857                 goto exit;
858
859         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
860         if (status)
861                 goto exit;
862
863         *data = (u64) lo | ((u64) hi << 32);
864
865 exit:
866         return status;
867 }
868
869 static int ql_8000_port_initialize(struct ql_adapter *qdev)
870 {
871         int status;
872         /*
873          * Get MPI firmware version for driver banner
874          * and ethool info.
875          */
876         status = ql_mb_about_fw(qdev);
877         if (status)
878                 goto exit;
879         status = ql_mb_get_fw_state(qdev);
880         if (status)
881                 goto exit;
882         /* Wake up a worker to get/set the TX/RX frame sizes. */
883         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
884 exit:
885         return status;
886 }
887
888 /* Take the MAC Core out of reset.
889  * Enable statistics counting.
890  * Take the transmitter/receiver out of reset.
891  * This functionality may be done in the MPI firmware at a
892  * later date.
893  */
894 static int ql_8012_port_initialize(struct ql_adapter *qdev)
895 {
896         int status = 0;
897         u32 data;
898
899         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
900                 /* Another function has the semaphore, so
901                  * wait for the port init bit to come ready.
902                  */
903                 netif_info(qdev, link, qdev->ndev,
904                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
905                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
906                 if (status) {
907                         netif_crit(qdev, link, qdev->ndev,
908                                    "Port initialize timed out.\n");
909                 }
910                 return status;
911         }
912
913         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
914         /* Set the core reset. */
915         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
916         if (status)
917                 goto end;
918         data |= GLOBAL_CFG_RESET;
919         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
920         if (status)
921                 goto end;
922
923         /* Clear the core reset and turn on jumbo for receiver. */
924         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
925         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
926         data |= GLOBAL_CFG_TX_STAT_EN;
927         data |= GLOBAL_CFG_RX_STAT_EN;
928         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
929         if (status)
930                 goto end;
931
932         /* Enable transmitter, and clear it's reset. */
933         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
934         if (status)
935                 goto end;
936         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
937         data |= TX_CFG_EN;      /* Enable the transmitter. */
938         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
939         if (status)
940                 goto end;
941
942         /* Enable receiver and clear it's reset. */
943         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
944         if (status)
945                 goto end;
946         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
947         data |= RX_CFG_EN;      /* Enable the receiver. */
948         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
949         if (status)
950                 goto end;
951
952         /* Turn on jumbo. */
953         status =
954             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
955         if (status)
956                 goto end;
957         status =
958             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
959         if (status)
960                 goto end;
961
962         /* Signal to the world that the port is enabled.        */
963         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
964 end:
965         ql_sem_unlock(qdev, qdev->xg_sem_mask);
966         return status;
967 }
968
969 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
970 {
971         return PAGE_SIZE << qdev->lbq_buf_order;
972 }
973
974 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
975 {
976         struct qlge_bq_desc *bq_desc;
977
978         bq_desc = &bq->queue[bq->next_to_clean];
979         bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
980
981         return bq_desc;
982 }
983
984 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
985                                                struct rx_ring *rx_ring)
986 {
987         struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
988
989         pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
990                                     qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
991
992         if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
993             ql_lbq_block_size(qdev)) {
994                 /* last chunk of the master page */
995                 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
996                                ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
997         }
998
999         return lbq_desc;
1000 }
1001
1002 /* Update an rx ring index. */
1003 static void ql_update_cq(struct rx_ring *rx_ring)
1004 {
1005         rx_ring->cnsmr_idx++;
1006         rx_ring->curr_entry++;
1007         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1008                 rx_ring->cnsmr_idx = 0;
1009                 rx_ring->curr_entry = rx_ring->cq_base;
1010         }
1011 }
1012
1013 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1014 {
1015         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1016 }
1017
1018 static const char * const bq_type_name[] = {
1019         [QLGE_SB] = "sbq",
1020         [QLGE_LB] = "lbq",
1021 };
1022
1023 /* return 0 or negative error */
1024 static int qlge_refill_sb(struct rx_ring *rx_ring,
1025                           struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1026 {
1027         struct ql_adapter *qdev = rx_ring->qdev;
1028         struct sk_buff *skb;
1029
1030         if (sbq_desc->p.skb)
1031                 return 0;
1032
1033         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1034                      "ring %u sbq: getting new skb for index %d.\n",
1035                      rx_ring->cq_id, sbq_desc->index);
1036
1037         skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1038         if (!skb)
1039                 return -ENOMEM;
1040         skb_reserve(skb, QLGE_SB_PAD);
1041
1042         sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
1043                                             SMALL_BUF_MAP_SIZE,
1044                                             PCI_DMA_FROMDEVICE);
1045         if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
1046                 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1047                 dev_kfree_skb_any(skb);
1048                 return -EIO;
1049         }
1050         *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1051
1052         sbq_desc->p.skb = skb;
1053         return 0;
1054 }
1055
1056 /* return 0 or negative error */
1057 static int qlge_refill_lb(struct rx_ring *rx_ring,
1058                           struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1059 {
1060         struct ql_adapter *qdev = rx_ring->qdev;
1061         struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1062
1063         if (!master_chunk->page) {
1064                 struct page *page;
1065                 dma_addr_t dma_addr;
1066
1067                 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1068                 if (unlikely(!page))
1069                         return -ENOMEM;
1070                 dma_addr = pci_map_page(qdev->pdev, page, 0,
1071                                         ql_lbq_block_size(qdev),
1072                                         PCI_DMA_FROMDEVICE);
1073                 if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
1074                         __free_pages(page, qdev->lbq_buf_order);
1075                         netif_err(qdev, drv, qdev->ndev,
1076                                   "PCI mapping failed.\n");
1077                         return -EIO;
1078                 }
1079                 master_chunk->page = page;
1080                 master_chunk->va = page_address(page);
1081                 master_chunk->offset = 0;
1082                 rx_ring->chunk_dma_addr = dma_addr;
1083         }
1084
1085         lbq_desc->p.pg_chunk = *master_chunk;
1086         lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1087         *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1088                                          lbq_desc->p.pg_chunk.offset);
1089
1090         /* Adjust the master page chunk for next
1091          * buffer get.
1092          */
1093         master_chunk->offset += qdev->lbq_buf_size;
1094         if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1095                 master_chunk->page = NULL;
1096         } else {
1097                 master_chunk->va += qdev->lbq_buf_size;
1098                 get_page(master_chunk->page);
1099         }
1100
1101         return 0;
1102 }
1103
1104 /* return 0 or negative error */
1105 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1106 {
1107         struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1108         struct ql_adapter *qdev = rx_ring->qdev;
1109         struct qlge_bq_desc *bq_desc;
1110         int refill_count;
1111         int retval;
1112         int i;
1113
1114         refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1115                                     bq->next_to_use);
1116         if (!refill_count)
1117                 return 0;
1118
1119         i = bq->next_to_use;
1120         bq_desc = &bq->queue[i];
1121         i -= QLGE_BQ_LEN;
1122         do {
1123                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1124                              "ring %u %s: try cleaning idx %d\n",
1125                              rx_ring->cq_id, bq_type_name[bq->type], i);
1126
1127                 if (bq->type == QLGE_SB)
1128                         retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1129                 else
1130                         retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1131                 if (retval < 0) {
1132                         netif_err(qdev, ifup, qdev->ndev,
1133                                   "ring %u %s: Could not get a page chunk, idx %d\n",
1134                                   rx_ring->cq_id, bq_type_name[bq->type], i);
1135                         break;
1136                 }
1137
1138                 bq_desc++;
1139                 i++;
1140                 if (unlikely(!i)) {
1141                         bq_desc = &bq->queue[0];
1142                         i -= QLGE_BQ_LEN;
1143                 }
1144                 refill_count--;
1145         } while (refill_count);
1146         i += QLGE_BQ_LEN;
1147
1148         if (bq->next_to_use != i) {
1149                 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1150                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1151                                      "ring %u %s: updating prod idx = %d.\n",
1152                                      rx_ring->cq_id, bq_type_name[bq->type],
1153                                      i);
1154                         ql_write_db_reg(i, bq->prod_idx_db_reg);
1155                 }
1156                 bq->next_to_use = i;
1157         }
1158
1159         return retval;
1160 }
1161
1162 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1163                                     unsigned long delay)
1164 {
1165         bool sbq_fail, lbq_fail;
1166
1167         sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1168         lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1169
1170         /* Minimum number of buffers needed to be able to receive at least one
1171          * frame of any format:
1172          * sbq: 1 for header + 1 for data
1173          * lbq: mtu 9000 / lb size
1174          * Below this, the queue might stall.
1175          */
1176         if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1177             (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1178              DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1179                 /* Allocations can take a long time in certain cases (ex.
1180                  * reclaim). Therefore, use a workqueue for long-running
1181                  * work items.
1182                  */
1183                 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1184                                       &rx_ring->refill_work, delay);
1185 }
1186
1187 static void qlge_slow_refill(struct work_struct *work)
1188 {
1189         struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1190                                                refill_work.work);
1191         struct napi_struct *napi = &rx_ring->napi;
1192
1193         napi_disable(napi);
1194         ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1195         napi_enable(napi);
1196
1197         local_bh_disable();
1198         /* napi_disable() might have prevented incomplete napi work from being
1199          * rescheduled.
1200          */
1201         napi_schedule(napi);
1202         /* trigger softirq processing */
1203         local_bh_enable();
1204 }
1205
1206 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1207  * fails at some stage, or from the interrupt when a tx completes.
1208  */
1209 static void ql_unmap_send(struct ql_adapter *qdev,
1210                           struct tx_ring_desc *tx_ring_desc, int mapped)
1211 {
1212         int i;
1213         for (i = 0; i < mapped; i++) {
1214                 if (i == 0 || (i == 7 && mapped > 7)) {
1215                         /*
1216                          * Unmap the skb->data area, or the
1217                          * external sglist (AKA the Outbound
1218                          * Address List (OAL)).
1219                          * If its the zeroeth element, then it's
1220                          * the skb->data area.  If it's the 7th
1221                          * element and there is more than 6 frags,
1222                          * then its an OAL.
1223                          */
1224                         if (i == 7) {
1225                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1226                                              qdev->ndev,
1227                                              "unmapping OAL area.\n");
1228                         }
1229                         pci_unmap_single(qdev->pdev,
1230                                          dma_unmap_addr(&tx_ring_desc->map[i],
1231                                                         mapaddr),
1232                                          dma_unmap_len(&tx_ring_desc->map[i],
1233                                                        maplen),
1234                                          PCI_DMA_TODEVICE);
1235                 } else {
1236                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1237                                      "unmapping frag %d.\n", i);
1238                         pci_unmap_page(qdev->pdev,
1239                                        dma_unmap_addr(&tx_ring_desc->map[i],
1240                                                       mapaddr),
1241                                        dma_unmap_len(&tx_ring_desc->map[i],
1242                                                      maplen), PCI_DMA_TODEVICE);
1243                 }
1244         }
1245
1246 }
1247
1248 /* Map the buffers for this transmit.  This will return
1249  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1250  */
1251 static int ql_map_send(struct ql_adapter *qdev,
1252                        struct ob_mac_iocb_req *mac_iocb_ptr,
1253                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1254 {
1255         int len = skb_headlen(skb);
1256         dma_addr_t map;
1257         int frag_idx, err, map_idx = 0;
1258         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1259         int frag_cnt = skb_shinfo(skb)->nr_frags;
1260
1261         if (frag_cnt) {
1262                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1263                              "frag_cnt = %d.\n", frag_cnt);
1264         }
1265         /*
1266          * Map the skb buffer first.
1267          */
1268         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1269
1270         err = pci_dma_mapping_error(qdev->pdev, map);
1271         if (err) {
1272                 netif_err(qdev, tx_queued, qdev->ndev,
1273                           "PCI mapping failed with error: %d\n", err);
1274
1275                 return NETDEV_TX_BUSY;
1276         }
1277
1278         tbd->len = cpu_to_le32(len);
1279         tbd->addr = cpu_to_le64(map);
1280         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1281         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1282         map_idx++;
1283
1284         /*
1285          * This loop fills the remainder of the 8 address descriptors
1286          * in the IOCB.  If there are more than 7 fragments, then the
1287          * eighth address desc will point to an external list (OAL).
1288          * When this happens, the remainder of the frags will be stored
1289          * in this list.
1290          */
1291         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1292                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1293                 tbd++;
1294                 if (frag_idx == 6 && frag_cnt > 7) {
1295                         /* Let's tack on an sglist.
1296                          * Our control block will now
1297                          * look like this:
1298                          * iocb->seg[0] = skb->data
1299                          * iocb->seg[1] = frag[0]
1300                          * iocb->seg[2] = frag[1]
1301                          * iocb->seg[3] = frag[2]
1302                          * iocb->seg[4] = frag[3]
1303                          * iocb->seg[5] = frag[4]
1304                          * iocb->seg[6] = frag[5]
1305                          * iocb->seg[7] = ptr to OAL (external sglist)
1306                          * oal->seg[0] = frag[6]
1307                          * oal->seg[1] = frag[7]
1308                          * oal->seg[2] = frag[8]
1309                          * oal->seg[3] = frag[9]
1310                          * oal->seg[4] = frag[10]
1311                          *      etc...
1312                          */
1313                         /* Tack on the OAL in the eighth segment of IOCB. */
1314                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1315                                              sizeof(struct oal),
1316                                              PCI_DMA_TODEVICE);
1317                         err = pci_dma_mapping_error(qdev->pdev, map);
1318                         if (err) {
1319                                 netif_err(qdev, tx_queued, qdev->ndev,
1320                                           "PCI mapping outbound address list with error: %d\n",
1321                                           err);
1322                                 goto map_error;
1323                         }
1324
1325                         tbd->addr = cpu_to_le64(map);
1326                         /*
1327                          * The length is the number of fragments
1328                          * that remain to be mapped times the length
1329                          * of our sglist (OAL).
1330                          */
1331                         tbd->len =
1332                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1333                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1334                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1335                                            map);
1336                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1337                                           sizeof(struct oal));
1338                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1339                         map_idx++;
1340                 }
1341
1342                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1343                                        DMA_TO_DEVICE);
1344
1345                 err = dma_mapping_error(&qdev->pdev->dev, map);
1346                 if (err) {
1347                         netif_err(qdev, tx_queued, qdev->ndev,
1348                                   "PCI mapping frags failed with error: %d.\n",
1349                                   err);
1350                         goto map_error;
1351                 }
1352
1353                 tbd->addr = cpu_to_le64(map);
1354                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1355                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1356                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1357                                   skb_frag_size(frag));
1358
1359         }
1360         /* Save the number of segments we've mapped. */
1361         tx_ring_desc->map_cnt = map_idx;
1362         /* Terminate the last segment. */
1363         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1364         return NETDEV_TX_OK;
1365
1366 map_error:
1367         /*
1368          * If the first frag mapping failed, then i will be zero.
1369          * This causes the unmap of the skb->data area.  Otherwise
1370          * we pass in the number of frags that mapped successfully
1371          * so they can be umapped.
1372          */
1373         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1374         return NETDEV_TX_BUSY;
1375 }
1376
1377 /* Categorizing receive firmware frame errors */
1378 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1379                                  struct rx_ring *rx_ring)
1380 {
1381         struct nic_stats *stats = &qdev->nic_stats;
1382
1383         stats->rx_err_count++;
1384         rx_ring->rx_errors++;
1385
1386         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1387         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1388                 stats->rx_code_err++;
1389                 break;
1390         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1391                 stats->rx_oversize_err++;
1392                 break;
1393         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1394                 stats->rx_undersize_err++;
1395                 break;
1396         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1397                 stats->rx_preamble_err++;
1398                 break;
1399         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1400                 stats->rx_frame_len_err++;
1401                 break;
1402         case IB_MAC_IOCB_RSP_ERR_CRC:
1403                 stats->rx_crc_err++;
1404         default:
1405                 break;
1406         }
1407 }
1408
1409 /**
1410  * ql_update_mac_hdr_len - helper routine to update the mac header length
1411  * based on vlan tags if present
1412  */
1413 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1414                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1415                                   void *page, size_t *len)
1416 {
1417         u16 *tags;
1418
1419         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1420                 return;
1421         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1422                 tags = (u16 *)page;
1423                 /* Look for stacked vlan tags in ethertype field */
1424                 if (tags[6] == ETH_P_8021Q &&
1425                     tags[8] == ETH_P_8021Q)
1426                         *len += 2 * VLAN_HLEN;
1427                 else
1428                         *len += VLAN_HLEN;
1429         }
1430 }
1431
1432 /* Process an inbound completion from an rx ring. */
1433 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1434                                        struct rx_ring *rx_ring,
1435                                        struct ib_mac_iocb_rsp *ib_mac_rsp,
1436                                        u32 length, u16 vlan_id)
1437 {
1438         struct sk_buff *skb;
1439         struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1440         struct napi_struct *napi = &rx_ring->napi;
1441
1442         /* Frame error, so drop the packet. */
1443         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1444                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1445                 put_page(lbq_desc->p.pg_chunk.page);
1446                 return;
1447         }
1448         napi->dev = qdev->ndev;
1449
1450         skb = napi_get_frags(napi);
1451         if (!skb) {
1452                 netif_err(qdev, drv, qdev->ndev,
1453                           "Couldn't get an skb, exiting.\n");
1454                 rx_ring->rx_dropped++;
1455                 put_page(lbq_desc->p.pg_chunk.page);
1456                 return;
1457         }
1458         prefetch(lbq_desc->p.pg_chunk.va);
1459         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1460                              lbq_desc->p.pg_chunk.page,
1461                              lbq_desc->p.pg_chunk.offset,
1462                              length);
1463
1464         skb->len += length;
1465         skb->data_len += length;
1466         skb->truesize += length;
1467         skb_shinfo(skb)->nr_frags++;
1468
1469         rx_ring->rx_packets++;
1470         rx_ring->rx_bytes += length;
1471         skb->ip_summed = CHECKSUM_UNNECESSARY;
1472         skb_record_rx_queue(skb, rx_ring->cq_id);
1473         if (vlan_id != 0xffff)
1474                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1475         napi_gro_frags(napi);
1476 }
1477
1478 /* Process an inbound completion from an rx ring. */
1479 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1480                                    struct rx_ring *rx_ring,
1481                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1482                                    u32 length, u16 vlan_id)
1483 {
1484         struct net_device *ndev = qdev->ndev;
1485         struct sk_buff *skb = NULL;
1486         void *addr;
1487         struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488         struct napi_struct *napi = &rx_ring->napi;
1489         size_t hlen = ETH_HLEN;
1490
1491         skb = netdev_alloc_skb(ndev, length);
1492         if (!skb) {
1493                 rx_ring->rx_dropped++;
1494                 put_page(lbq_desc->p.pg_chunk.page);
1495                 return;
1496         }
1497
1498         addr = lbq_desc->p.pg_chunk.va;
1499         prefetch(addr);
1500
1501         /* Frame error, so drop the packet. */
1502         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504                 goto err_out;
1505         }
1506
1507         /* Update the MAC header length*/
1508         ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1509
1510         /* The max framesize filter on this chip is set higher than
1511          * MTU since FCoE uses 2k frames.
1512          */
1513         if (skb->len > ndev->mtu + hlen) {
1514                 netif_err(qdev, drv, qdev->ndev,
1515                           "Segment too small, dropping.\n");
1516                 rx_ring->rx_dropped++;
1517                 goto err_out;
1518         }
1519         skb_put_data(skb, addr, hlen);
1520         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1521                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1522                      length);
1523         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1524                            lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1525         skb->len += length - hlen;
1526         skb->data_len += length - hlen;
1527         skb->truesize += length - hlen;
1528
1529         rx_ring->rx_packets++;
1530         rx_ring->rx_bytes += skb->len;
1531         skb->protocol = eth_type_trans(skb, ndev);
1532         skb_checksum_none_assert(skb);
1533
1534         if ((ndev->features & NETIF_F_RXCSUM) &&
1535             !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1536                 /* TCP frame. */
1537                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1538                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1539                                      "TCP checksum done!\n");
1540                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1541                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1542                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1543                         /* Unfragmented ipv4 UDP frame. */
1544                         struct iphdr *iph =
1545                                 (struct iphdr *)((u8 *)addr + hlen);
1546                         if (!(iph->frag_off &
1547                                 htons(IP_MF|IP_OFFSET))) {
1548                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1550                                              qdev->ndev,
1551                                              "UDP checksum done!\n");
1552                         }
1553                 }
1554         }
1555
1556         skb_record_rx_queue(skb, rx_ring->cq_id);
1557         if (vlan_id != 0xffff)
1558                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1559         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1560                 napi_gro_receive(napi, skb);
1561         else
1562                 netif_receive_skb(skb);
1563         return;
1564 err_out:
1565         dev_kfree_skb_any(skb);
1566         put_page(lbq_desc->p.pg_chunk.page);
1567 }
1568
1569 /* Process an inbound completion from an rx ring. */
1570 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1571                                   struct rx_ring *rx_ring,
1572                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1573                                   u32 length, u16 vlan_id)
1574 {
1575         struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1576         struct net_device *ndev = qdev->ndev;
1577         struct sk_buff *skb, *new_skb;
1578
1579         skb = sbq_desc->p.skb;
1580         /* Allocate new_skb and copy */
1581         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1582         if (!new_skb) {
1583                 rx_ring->rx_dropped++;
1584                 return;
1585         }
1586         skb_reserve(new_skb, NET_IP_ALIGN);
1587
1588         pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
1589                                     SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1590
1591         skb_put_data(new_skb, skb->data, length);
1592
1593         skb = new_skb;
1594
1595         /* Frame error, so drop the packet. */
1596         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1597                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1598                 dev_kfree_skb_any(skb);
1599                 return;
1600         }
1601
1602         /* loopback self test for ethtool */
1603         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1604                 ql_check_lb_frame(qdev, skb);
1605                 dev_kfree_skb_any(skb);
1606                 return;
1607         }
1608
1609         /* The max framesize filter on this chip is set higher than
1610          * MTU since FCoE uses 2k frames.
1611          */
1612         if (skb->len > ndev->mtu + ETH_HLEN) {
1613                 dev_kfree_skb_any(skb);
1614                 rx_ring->rx_dropped++;
1615                 return;
1616         }
1617
1618         prefetch(skb->data);
1619         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1620                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1621                              "%s Multicast.\n",
1622                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1623                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1624                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1625                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1626                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1627                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1628         }
1629         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1630                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1631                              "Promiscuous Packet.\n");
1632
1633         rx_ring->rx_packets++;
1634         rx_ring->rx_bytes += skb->len;
1635         skb->protocol = eth_type_trans(skb, ndev);
1636         skb_checksum_none_assert(skb);
1637
1638         /* If rx checksum is on, and there are no
1639          * csum or frame errors.
1640          */
1641         if ((ndev->features & NETIF_F_RXCSUM) &&
1642             !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1643                 /* TCP frame. */
1644                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1645                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646                                      "TCP checksum done!\n");
1647                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1648                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1649                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1650                         /* Unfragmented ipv4 UDP frame. */
1651                         struct iphdr *iph = (struct iphdr *) skb->data;
1652                         if (!(iph->frag_off &
1653                                 htons(IP_MF|IP_OFFSET))) {
1654                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1655                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1656                                              qdev->ndev,
1657                                              "UDP checksum done!\n");
1658                         }
1659                 }
1660         }
1661
1662         skb_record_rx_queue(skb, rx_ring->cq_id);
1663         if (vlan_id != 0xffff)
1664                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1665         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1666                 napi_gro_receive(&rx_ring->napi, skb);
1667         else
1668                 netif_receive_skb(skb);
1669 }
1670
1671 static void ql_realign_skb(struct sk_buff *skb, int len)
1672 {
1673         void *temp_addr = skb->data;
1674
1675         /* Undo the skb_reserve(skb,32) we did before
1676          * giving to hardware, and realign data on
1677          * a 2-byte boundary.
1678          */
1679         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1680         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1681         memmove(skb->data, temp_addr, len);
1682 }
1683
1684 /*
1685  * This function builds an skb for the given inbound
1686  * completion.  It will be rewritten for readability in the near
1687  * future, but for not it works well.
1688  */
1689 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1690                                        struct rx_ring *rx_ring,
1691                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1692 {
1693         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1694         u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1695         struct qlge_bq_desc *lbq_desc, *sbq_desc;
1696         struct sk_buff *skb = NULL;
1697         size_t hlen = ETH_HLEN;
1698
1699         /*
1700          * Handle the header buffer if present.
1701          */
1702         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1703             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1704                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705                              "Header of %d bytes in small buffer.\n", hdr_len);
1706                 /*
1707                  * Headers fit nicely into a small buffer.
1708                  */
1709                 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1710                 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1711                                  SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1712                 skb = sbq_desc->p.skb;
1713                 ql_realign_skb(skb, hdr_len);
1714                 skb_put(skb, hdr_len);
1715                 sbq_desc->p.skb = NULL;
1716         }
1717
1718         /*
1719          * Handle the data buffer(s).
1720          */
1721         if (unlikely(!length)) {        /* Is there data too? */
1722                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1723                              "No Data buffer in this packet.\n");
1724                 return skb;
1725         }
1726
1727         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1728                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1729                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1730                                      "Headers in small, data of %d bytes in small, combine them.\n",
1731                                      length);
1732                         /*
1733                          * Data is less than small buffer size so it's
1734                          * stuffed in a small buffer.
1735                          * For this case we append the data
1736                          * from the "data" small buffer to the "header" small
1737                          * buffer.
1738                          */
1739                         sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1740                         pci_dma_sync_single_for_cpu(qdev->pdev,
1741                                                     sbq_desc->dma_addr,
1742                                                     SMALL_BUF_MAP_SIZE,
1743                                                     PCI_DMA_FROMDEVICE);
1744                         skb_put_data(skb, sbq_desc->p.skb->data, length);
1745                 } else {
1746                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1747                                      "%d bytes in a single small buffer.\n",
1748                                      length);
1749                         sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1750                         skb = sbq_desc->p.skb;
1751                         ql_realign_skb(skb, length);
1752                         skb_put(skb, length);
1753                         pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1754                                          SMALL_BUF_MAP_SIZE,
1755                                          PCI_DMA_FROMDEVICE);
1756                         sbq_desc->p.skb = NULL;
1757                 }
1758         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1759                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1760                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1761                                      "Header in small, %d bytes in large. Chain large to small!\n",
1762                                      length);
1763                         /*
1764                          * The data is in a single large buffer.  We
1765                          * chain it to the header buffer's skb and let
1766                          * it rip.
1767                          */
1768                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1771                                      lbq_desc->p.pg_chunk.offset, length);
1772                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1773                                            lbq_desc->p.pg_chunk.offset, length);
1774                         skb->len += length;
1775                         skb->data_len += length;
1776                         skb->truesize += length;
1777                 } else {
1778                         /*
1779                          * The headers and data are in a single large buffer. We
1780                          * copy it to a new skb and let it go. This can happen with
1781                          * jumbo mtu on a non-TCP/UDP frame.
1782                          */
1783                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1784                         skb = netdev_alloc_skb(qdev->ndev, length);
1785                         if (!skb) {
1786                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1787                                              "No skb available, drop the packet.\n");
1788                                 return NULL;
1789                         }
1790                         pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1791                                        qdev->lbq_buf_size,
1792                                        PCI_DMA_FROMDEVICE);
1793                         skb_reserve(skb, NET_IP_ALIGN);
1794                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1796                                      length);
1797                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1798                                            lbq_desc->p.pg_chunk.offset,
1799                                            length);
1800                         skb->len += length;
1801                         skb->data_len += length;
1802                         skb->truesize += length;
1803                         ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1804                                               lbq_desc->p.pg_chunk.va,
1805                                               &hlen);
1806                         __pskb_pull_tail(skb, hlen);
1807                 }
1808         } else {
1809                 /*
1810                  * The data is in a chain of large buffers
1811                  * pointed to by a small buffer.  We loop
1812                  * thru and chain them to the our small header
1813                  * buffer's skb.
1814                  * frags:  There are 18 max frags and our small
1815                  *         buffer will hold 32 of them. The thing is,
1816                  *         we'll use 3 max for our 9000 byte jumbo
1817                  *         frames.  If the MTU goes up we could
1818                  *          eventually be in trouble.
1819                  */
1820                 int size, i = 0;
1821                 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1822                 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1823                                  SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1824                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1825                         /*
1826                          * This is an non TCP/UDP IP frame, so
1827                          * the headers aren't split into a small
1828                          * buffer.  We have to use the small buffer
1829                          * that contains our sg list as our skb to
1830                          * send upstairs. Copy the sg list here to
1831                          * a local buffer and use it to find the
1832                          * pages to chain.
1833                          */
1834                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1835                                      "%d bytes of headers & data in chain of large.\n",
1836                                      length);
1837                         skb = sbq_desc->p.skb;
1838                         sbq_desc->p.skb = NULL;
1839                         skb_reserve(skb, NET_IP_ALIGN);
1840                 }
1841                 do {
1842                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1843                         size = min(length, qdev->lbq_buf_size);
1844
1845                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846                                      "Adding page %d to skb for %d bytes.\n",
1847                                      i, size);
1848                         skb_fill_page_desc(skb, i,
1849                                            lbq_desc->p.pg_chunk.page,
1850                                            lbq_desc->p.pg_chunk.offset, size);
1851                         skb->len += size;
1852                         skb->data_len += size;
1853                         skb->truesize += size;
1854                         length -= size;
1855                         i++;
1856                 } while (length > 0);
1857                 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1858                                       &hlen);
1859                 __pskb_pull_tail(skb, hlen);
1860         }
1861         return skb;
1862 }
1863
1864 /* Process an inbound completion from an rx ring. */
1865 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1866                                          struct rx_ring *rx_ring,
1867                                          struct ib_mac_iocb_rsp *ib_mac_rsp,
1868                                          u16 vlan_id)
1869 {
1870         struct net_device *ndev = qdev->ndev;
1871         struct sk_buff *skb = NULL;
1872
1873         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1874
1875         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1876         if (unlikely(!skb)) {
1877                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1878                              "No skb available, drop packet.\n");
1879                 rx_ring->rx_dropped++;
1880                 return;
1881         }
1882
1883         /* Frame error, so drop the packet. */
1884         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1885                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1886                 dev_kfree_skb_any(skb);
1887                 return;
1888         }
1889
1890         /* The max framesize filter on this chip is set higher than
1891          * MTU since FCoE uses 2k frames.
1892          */
1893         if (skb->len > ndev->mtu + ETH_HLEN) {
1894                 dev_kfree_skb_any(skb);
1895                 rx_ring->rx_dropped++;
1896                 return;
1897         }
1898
1899         /* loopback self test for ethtool */
1900         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1901                 ql_check_lb_frame(qdev, skb);
1902                 dev_kfree_skb_any(skb);
1903                 return;
1904         }
1905
1906         prefetch(skb->data);
1907         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1908                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1909                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1910                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1911                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1912                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1913                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1914                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1915                 rx_ring->rx_multicast++;
1916         }
1917         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1918                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919                              "Promiscuous Packet.\n");
1920         }
1921
1922         skb->protocol = eth_type_trans(skb, ndev);
1923         skb_checksum_none_assert(skb);
1924
1925         /* If rx checksum is on, and there are no
1926          * csum or frame errors.
1927          */
1928         if ((ndev->features & NETIF_F_RXCSUM) &&
1929             !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1930                 /* TCP frame. */
1931                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1932                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1933                                      "TCP checksum done!\n");
1934                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1935                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1936                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1937                 /* Unfragmented ipv4 UDP frame. */
1938                         struct iphdr *iph = (struct iphdr *) skb->data;
1939                         if (!(iph->frag_off &
1940                                 htons(IP_MF|IP_OFFSET))) {
1941                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1942                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1943                                              "TCP checksum done!\n");
1944                         }
1945                 }
1946         }
1947
1948         rx_ring->rx_packets++;
1949         rx_ring->rx_bytes += skb->len;
1950         skb_record_rx_queue(skb, rx_ring->cq_id);
1951         if (vlan_id != 0xffff)
1952                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1953         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1954                 napi_gro_receive(&rx_ring->napi, skb);
1955         else
1956                 netif_receive_skb(skb);
1957 }
1958
1959 /* Process an inbound completion from an rx ring. */
1960 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1961                                             struct rx_ring *rx_ring,
1962                                             struct ib_mac_iocb_rsp *ib_mac_rsp)
1963 {
1964         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1965         u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1966                         (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1967                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1968                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1969
1970         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1971
1972         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1973                 /* The data and headers are split into
1974                  * separate buffers.
1975                  */
1976                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1977                                              vlan_id);
1978         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1979                 /* The data fit in a single small buffer.
1980                  * Allocate a new skb, copy the data and
1981                  * return the buffer to the free pool.
1982                  */
1983                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1984                                       vlan_id);
1985         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1986                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1987                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1988                 /* TCP packet in a page chunk that's been checksummed.
1989                  * Tack it on to our GRO skb and let it go.
1990                  */
1991                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1992                                            vlan_id);
1993         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1994                 /* Non-TCP packet in a page chunk. Allocate an
1995                  * skb, tack it on frags, and send it up.
1996                  */
1997                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1998                                        vlan_id);
1999         } else {
2000                 /* Non-TCP/UDP large frames that span multiple buffers
2001                  * can be processed corrrectly by the split frame logic.
2002                  */
2003                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2004                                              vlan_id);
2005         }
2006
2007         return (unsigned long)length;
2008 }
2009
2010 /* Process an outbound completion from an rx ring. */
2011 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2012                                    struct ob_mac_iocb_rsp *mac_rsp)
2013 {
2014         struct tx_ring *tx_ring;
2015         struct tx_ring_desc *tx_ring_desc;
2016
2017         QL_DUMP_OB_MAC_RSP(mac_rsp);
2018         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2019         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2020         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2021         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2022         tx_ring->tx_packets++;
2023         dev_kfree_skb(tx_ring_desc->skb);
2024         tx_ring_desc->skb = NULL;
2025
2026         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2027                                         OB_MAC_IOCB_RSP_S |
2028                                         OB_MAC_IOCB_RSP_L |
2029                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2030                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2031                         netif_warn(qdev, tx_done, qdev->ndev,
2032                                    "Total descriptor length did not match transfer length.\n");
2033                 }
2034                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2035                         netif_warn(qdev, tx_done, qdev->ndev,
2036                                    "Frame too short to be valid, not sent.\n");
2037                 }
2038                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2039                         netif_warn(qdev, tx_done, qdev->ndev,
2040                                    "Frame too long, but sent anyway.\n");
2041                 }
2042                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2043                         netif_warn(qdev, tx_done, qdev->ndev,
2044                                    "PCI backplane error. Frame not sent.\n");
2045                 }
2046         }
2047         atomic_inc(&tx_ring->tx_count);
2048 }
2049
2050 /* Fire up a handler to reset the MPI processor. */
2051 void ql_queue_fw_error(struct ql_adapter *qdev)
2052 {
2053         ql_link_off(qdev);
2054         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2055 }
2056
2057 void ql_queue_asic_error(struct ql_adapter *qdev)
2058 {
2059         ql_link_off(qdev);
2060         ql_disable_interrupts(qdev);
2061         /* Clear adapter up bit to signal the recovery
2062          * process that it shouldn't kill the reset worker
2063          * thread
2064          */
2065         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2066         /* Set asic recovery bit to indicate reset process that we are
2067          * in fatal error recovery process rather than normal close
2068          */
2069         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2070         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2071 }
2072
2073 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2074                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2075 {
2076         switch (ib_ae_rsp->event) {
2077         case MGMT_ERR_EVENT:
2078                 netif_err(qdev, rx_err, qdev->ndev,
2079                           "Management Processor Fatal Error.\n");
2080                 ql_queue_fw_error(qdev);
2081                 return;
2082
2083         case CAM_LOOKUP_ERR_EVENT:
2084                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2085                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2086                 ql_queue_asic_error(qdev);
2087                 return;
2088
2089         case SOFT_ECC_ERROR_EVENT:
2090                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2091                 ql_queue_asic_error(qdev);
2092                 break;
2093
2094         case PCI_ERR_ANON_BUF_RD:
2095                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2096                                         "anonymous buffers from rx_ring %d.\n",
2097                                         ib_ae_rsp->q_id);
2098                 ql_queue_asic_error(qdev);
2099                 break;
2100
2101         default:
2102                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2103                           ib_ae_rsp->event);
2104                 ql_queue_asic_error(qdev);
2105                 break;
2106         }
2107 }
2108
2109 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2110 {
2111         struct ql_adapter *qdev = rx_ring->qdev;
2112         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2113         struct ob_mac_iocb_rsp *net_rsp = NULL;
2114         int count = 0;
2115
2116         struct tx_ring *tx_ring;
2117         /* While there are entries in the completion queue. */
2118         while (prod != rx_ring->cnsmr_idx) {
2119
2120                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121                              "cq_id = %d, prod = %d, cnsmr = %d\n",
2122                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2123
2124                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2125                 rmb();
2126                 switch (net_rsp->opcode) {
2127
2128                 case OPCODE_OB_MAC_TSO_IOCB:
2129                 case OPCODE_OB_MAC_IOCB:
2130                         ql_process_mac_tx_intr(qdev, net_rsp);
2131                         break;
2132                 default:
2133                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2134                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2135                                      net_rsp->opcode);
2136                 }
2137                 count++;
2138                 ql_update_cq(rx_ring);
2139                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2140         }
2141         if (!net_rsp)
2142                 return 0;
2143         ql_write_cq_idx(rx_ring);
2144         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2145         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2146                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2147                         /*
2148                          * The queue got stopped because the tx_ring was full.
2149                          * Wake it up, because it's now at least 25% empty.
2150                          */
2151                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2152         }
2153
2154         return count;
2155 }
2156
2157 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2158 {
2159         struct ql_adapter *qdev = rx_ring->qdev;
2160         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2161         struct ql_net_rsp_iocb *net_rsp;
2162         int count = 0;
2163
2164         /* While there are entries in the completion queue. */
2165         while (prod != rx_ring->cnsmr_idx) {
2166
2167                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2168                              "cq_id = %d, prod = %d, cnsmr = %d\n",
2169                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2170
2171                 net_rsp = rx_ring->curr_entry;
2172                 rmb();
2173                 switch (net_rsp->opcode) {
2174                 case OPCODE_IB_MAC_IOCB:
2175                         ql_process_mac_rx_intr(qdev, rx_ring,
2176                                                (struct ib_mac_iocb_rsp *)
2177                                                net_rsp);
2178                         break;
2179
2180                 case OPCODE_IB_AE_IOCB:
2181                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2182                                                 net_rsp);
2183                         break;
2184                 default:
2185                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2186                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2187                                      net_rsp->opcode);
2188                         break;
2189                 }
2190                 count++;
2191                 ql_update_cq(rx_ring);
2192                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2193                 if (count == budget)
2194                         break;
2195         }
2196         ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2197         ql_write_cq_idx(rx_ring);
2198         return count;
2199 }
2200
2201 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2202 {
2203         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2204         struct ql_adapter *qdev = rx_ring->qdev;
2205         struct rx_ring *trx_ring;
2206         int i, work_done = 0;
2207         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2208
2209         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2210                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2211
2212         /* Service the TX rings first.  They start
2213          * right after the RSS rings.
2214          */
2215         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2216                 trx_ring = &qdev->rx_ring[i];
2217                 /* If this TX completion ring belongs to this vector and
2218                  * it's not empty then service it.
2219                  */
2220                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2221                     (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2222                      trx_ring->cnsmr_idx)) {
2223                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2224                                      "%s: Servicing TX completion ring %d.\n",
2225                                      __func__, trx_ring->cq_id);
2226                         ql_clean_outbound_rx_ring(trx_ring);
2227                 }
2228         }
2229
2230         /*
2231          * Now service the RSS ring if it's active.
2232          */
2233         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2234                                         rx_ring->cnsmr_idx) {
2235                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2236                              "%s: Servicing RX completion ring %d.\n",
2237                              __func__, rx_ring->cq_id);
2238                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2239         }
2240
2241         if (work_done < budget) {
2242                 napi_complete_done(napi, work_done);
2243                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2244         }
2245         return work_done;
2246 }
2247
2248 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2249 {
2250         struct ql_adapter *qdev = netdev_priv(ndev);
2251
2252         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2253                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2254                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2255         } else {
2256                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2257         }
2258 }
2259
2260 /**
2261  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2262  * based on the features to enable/disable hardware vlan accel
2263  */
2264 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2265                                         netdev_features_t features)
2266 {
2267         struct ql_adapter *qdev = netdev_priv(ndev);
2268         int status = 0;
2269         bool need_restart = netif_running(ndev);
2270
2271         if (need_restart) {
2272                 status = ql_adapter_down(qdev);
2273                 if (status) {
2274                         netif_err(qdev, link, qdev->ndev,
2275                                   "Failed to bring down the adapter\n");
2276                         return status;
2277                 }
2278         }
2279
2280         /* update the features with resent change */
2281         ndev->features = features;
2282
2283         if (need_restart) {
2284                 status = ql_adapter_up(qdev);
2285                 if (status) {
2286                         netif_err(qdev, link, qdev->ndev,
2287                                   "Failed to bring up the adapter\n");
2288                         return status;
2289                 }
2290         }
2291
2292         return status;
2293 }
2294
2295 static int qlge_set_features(struct net_device *ndev,
2296                              netdev_features_t features)
2297 {
2298         netdev_features_t changed = ndev->features ^ features;
2299         int err;
2300
2301         if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2302                 /* Update the behavior of vlan accel in the adapter */
2303                 err = qlge_update_hw_vlan_features(ndev, features);
2304                 if (err)
2305                         return err;
2306
2307                 qlge_vlan_mode(ndev, features);
2308         }
2309
2310         return 0;
2311 }
2312
2313 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2314 {
2315         u32 enable_bit = MAC_ADDR_E;
2316         int err;
2317
2318         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2319                                   MAC_ADDR_TYPE_VLAN, vid);
2320         if (err)
2321                 netif_err(qdev, ifup, qdev->ndev,
2322                           "Failed to init vlan address.\n");
2323         return err;
2324 }
2325
2326 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2327 {
2328         struct ql_adapter *qdev = netdev_priv(ndev);
2329         int status;
2330         int err;
2331
2332         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2333         if (status)
2334                 return status;
2335
2336         err = __qlge_vlan_rx_add_vid(qdev, vid);
2337         set_bit(vid, qdev->active_vlans);
2338
2339         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2340
2341         return err;
2342 }
2343
2344 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2345 {
2346         u32 enable_bit = 0;
2347         int err;
2348
2349         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350                                   MAC_ADDR_TYPE_VLAN, vid);
2351         if (err)
2352                 netif_err(qdev, ifup, qdev->ndev,
2353                           "Failed to clear vlan address.\n");
2354         return err;
2355 }
2356
2357 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2358 {
2359         struct ql_adapter *qdev = netdev_priv(ndev);
2360         int status;
2361         int err;
2362
2363         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364         if (status)
2365                 return status;
2366
2367         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2368         clear_bit(vid, qdev->active_vlans);
2369
2370         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2371
2372         return err;
2373 }
2374
2375 static void qlge_restore_vlan(struct ql_adapter *qdev)
2376 {
2377         int status;
2378         u16 vid;
2379
2380         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381         if (status)
2382                 return;
2383
2384         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2385                 __qlge_vlan_rx_add_vid(qdev, vid);
2386
2387         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2388 }
2389
2390 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2391 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2392 {
2393         struct rx_ring *rx_ring = dev_id;
2394         napi_schedule(&rx_ring->napi);
2395         return IRQ_HANDLED;
2396 }
2397
2398 /* This handles a fatal error, MPI activity, and the default
2399  * rx_ring in an MSI-X multiple vector environment.
2400  * In MSI/Legacy environment it also process the rest of
2401  * the rx_rings.
2402  */
2403 static irqreturn_t qlge_isr(int irq, void *dev_id)
2404 {
2405         struct rx_ring *rx_ring = dev_id;
2406         struct ql_adapter *qdev = rx_ring->qdev;
2407         struct intr_context *intr_context = &qdev->intr_context[0];
2408         u32 var;
2409         int work_done = 0;
2410
2411         /* Experience shows that when using INTx interrupts, interrupts must
2412          * be masked manually.
2413          * When using MSI mode, INTR_EN_EN must be explicitly disabled
2414          * (even though it is auto-masked), otherwise a later command to
2415          * enable it is not effective.
2416          */
2417         if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2418                 ql_disable_completion_interrupt(qdev, 0);
2419
2420         var = ql_read32(qdev, STS);
2421
2422         /*
2423          * Check for fatal error.
2424          */
2425         if (var & STS_FE) {
2426                 ql_disable_completion_interrupt(qdev, 0);
2427                 ql_queue_asic_error(qdev);
2428                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2429                 var = ql_read32(qdev, ERR_STS);
2430                 netdev_err(qdev->ndev, "Resetting chip. "
2431                                         "Error Status Register = 0x%x\n", var);
2432                 return IRQ_HANDLED;
2433         }
2434
2435         /*
2436          * Check MPI processor activity.
2437          */
2438         if ((var & STS_PI) &&
2439             (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2440                 /*
2441                  * We've got an async event or mailbox completion.
2442                  * Handle it and clear the source of the interrupt.
2443                  */
2444                 netif_err(qdev, intr, qdev->ndev,
2445                           "Got MPI processor interrupt.\n");
2446                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2447                 queue_delayed_work_on(smp_processor_id(),
2448                                       qdev->workqueue, &qdev->mpi_work, 0);
2449                 work_done++;
2450         }
2451
2452         /*
2453          * Get the bit-mask that shows the active queues for this
2454          * pass.  Compare it to the queues that this irq services
2455          * and call napi if there's a match.
2456          */
2457         var = ql_read32(qdev, ISR1);
2458         if (var & intr_context->irq_mask) {
2459                 netif_info(qdev, intr, qdev->ndev,
2460                            "Waking handler for rx_ring[0].\n");
2461                 napi_schedule(&rx_ring->napi);
2462                 work_done++;
2463         } else {
2464                 /* Experience shows that the device sometimes signals an
2465                  * interrupt but no work is scheduled from this function.
2466                  * Nevertheless, the interrupt is auto-masked. Therefore, we
2467                  * systematically re-enable the interrupt if we didn't
2468                  * schedule napi.
2469                  */
2470                 ql_enable_completion_interrupt(qdev, 0);
2471         }
2472
2473         return work_done ? IRQ_HANDLED : IRQ_NONE;
2474 }
2475
2476 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2477 {
2478
2479         if (skb_is_gso(skb)) {
2480                 int err;
2481                 __be16 l3_proto = vlan_get_protocol(skb);
2482
2483                 err = skb_cow_head(skb, 0);
2484                 if (err < 0)
2485                         return err;
2486
2487                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2488                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2489                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2490                 mac_iocb_ptr->total_hdrs_len =
2491                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2492                 mac_iocb_ptr->net_trans_offset =
2493                     cpu_to_le16(skb_network_offset(skb) |
2494                                 skb_transport_offset(skb)
2495                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2496                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2497                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2498                 if (likely(l3_proto == htons(ETH_P_IP))) {
2499                         struct iphdr *iph = ip_hdr(skb);
2500                         iph->check = 0;
2501                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2502                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2503                                                                  iph->daddr, 0,
2504                                                                  IPPROTO_TCP,
2505                                                                  0);
2506                 } else if (l3_proto == htons(ETH_P_IPV6)) {
2507                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2508                         tcp_hdr(skb)->check =
2509                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2510                                              &ipv6_hdr(skb)->daddr,
2511                                              0, IPPROTO_TCP, 0);
2512                 }
2513                 return 1;
2514         }
2515         return 0;
2516 }
2517
2518 static void ql_hw_csum_setup(struct sk_buff *skb,
2519                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2520 {
2521         int len;
2522         struct iphdr *iph = ip_hdr(skb);
2523         __sum16 *check;
2524         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2525         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2526         mac_iocb_ptr->net_trans_offset =
2527                 cpu_to_le16(skb_network_offset(skb) |
2528                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2529
2530         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2531         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2532         if (likely(iph->protocol == IPPROTO_TCP)) {
2533                 check = &(tcp_hdr(skb)->check);
2534                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2535                 mac_iocb_ptr->total_hdrs_len =
2536                     cpu_to_le16(skb_transport_offset(skb) +
2537                                 (tcp_hdr(skb)->doff << 2));
2538         } else {
2539                 check = &(udp_hdr(skb)->check);
2540                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2541                 mac_iocb_ptr->total_hdrs_len =
2542                     cpu_to_le16(skb_transport_offset(skb) +
2543                                 sizeof(struct udphdr));
2544         }
2545         *check = ~csum_tcpudp_magic(iph->saddr,
2546                                     iph->daddr, len, iph->protocol, 0);
2547 }
2548
2549 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2550 {
2551         struct tx_ring_desc *tx_ring_desc;
2552         struct ob_mac_iocb_req *mac_iocb_ptr;
2553         struct ql_adapter *qdev = netdev_priv(ndev);
2554         int tso;
2555         struct tx_ring *tx_ring;
2556         u32 tx_ring_idx = (u32) skb->queue_mapping;
2557
2558         tx_ring = &qdev->tx_ring[tx_ring_idx];
2559
2560         if (skb_padto(skb, ETH_ZLEN))
2561                 return NETDEV_TX_OK;
2562
2563         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2564                 netif_info(qdev, tx_queued, qdev->ndev,
2565                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2566                            __func__, tx_ring_idx);
2567                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2568                 tx_ring->tx_errors++;
2569                 return NETDEV_TX_BUSY;
2570         }
2571         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2572         mac_iocb_ptr = tx_ring_desc->queue_entry;
2573         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2574
2575         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2576         mac_iocb_ptr->tid = tx_ring_desc->index;
2577         /* We use the upper 32-bits to store the tx queue for this IO.
2578          * When we get the completion we can use it to establish the context.
2579          */
2580         mac_iocb_ptr->txq_idx = tx_ring_idx;
2581         tx_ring_desc->skb = skb;
2582
2583         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2584
2585         if (skb_vlan_tag_present(skb)) {
2586                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2587                              "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2588                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2589                 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2590         }
2591         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2592         if (tso < 0) {
2593                 dev_kfree_skb_any(skb);
2594                 return NETDEV_TX_OK;
2595         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2596                 ql_hw_csum_setup(skb,
2597                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2598         }
2599         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2600                         NETDEV_TX_OK) {
2601                 netif_err(qdev, tx_queued, qdev->ndev,
2602                           "Could not map the segments.\n");
2603                 tx_ring->tx_errors++;
2604                 return NETDEV_TX_BUSY;
2605         }
2606         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2607         tx_ring->prod_idx++;
2608         if (tx_ring->prod_idx == tx_ring->wq_len)
2609                 tx_ring->prod_idx = 0;
2610         wmb();
2611
2612         ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2613         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2614                      "tx queued, slot %d, len %d\n",
2615                      tx_ring->prod_idx, skb->len);
2616
2617         atomic_dec(&tx_ring->tx_count);
2618
2619         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2620                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2621                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2622                         /*
2623                          * The queue got stopped because the tx_ring was full.
2624                          * Wake it up, because it's now at least 25% empty.
2625                          */
2626                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2627         }
2628         return NETDEV_TX_OK;
2629 }
2630
2631 static void ql_free_shadow_space(struct ql_adapter *qdev)
2632 {
2633         if (qdev->rx_ring_shadow_reg_area) {
2634                 pci_free_consistent(qdev->pdev,
2635                                     PAGE_SIZE,
2636                                     qdev->rx_ring_shadow_reg_area,
2637                                     qdev->rx_ring_shadow_reg_dma);
2638                 qdev->rx_ring_shadow_reg_area = NULL;
2639         }
2640         if (qdev->tx_ring_shadow_reg_area) {
2641                 pci_free_consistent(qdev->pdev,
2642                                     PAGE_SIZE,
2643                                     qdev->tx_ring_shadow_reg_area,
2644                                     qdev->tx_ring_shadow_reg_dma);
2645                 qdev->tx_ring_shadow_reg_area = NULL;
2646         }
2647 }
2648
2649 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2650 {
2651         qdev->rx_ring_shadow_reg_area =
2652                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2653                                       &qdev->rx_ring_shadow_reg_dma);
2654         if (!qdev->rx_ring_shadow_reg_area) {
2655                 netif_err(qdev, ifup, qdev->ndev,
2656                           "Allocation of RX shadow space failed.\n");
2657                 return -ENOMEM;
2658         }
2659
2660         qdev->tx_ring_shadow_reg_area =
2661                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2662                                       &qdev->tx_ring_shadow_reg_dma);
2663         if (!qdev->tx_ring_shadow_reg_area) {
2664                 netif_err(qdev, ifup, qdev->ndev,
2665                           "Allocation of TX shadow space failed.\n");
2666                 goto err_wqp_sh_area;
2667         }
2668         return 0;
2669
2670 err_wqp_sh_area:
2671         pci_free_consistent(qdev->pdev,
2672                             PAGE_SIZE,
2673                             qdev->rx_ring_shadow_reg_area,
2674                             qdev->rx_ring_shadow_reg_dma);
2675         return -ENOMEM;
2676 }
2677
2678 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2679 {
2680         struct tx_ring_desc *tx_ring_desc;
2681         int i;
2682         struct ob_mac_iocb_req *mac_iocb_ptr;
2683
2684         mac_iocb_ptr = tx_ring->wq_base;
2685         tx_ring_desc = tx_ring->q;
2686         for (i = 0; i < tx_ring->wq_len; i++) {
2687                 tx_ring_desc->index = i;
2688                 tx_ring_desc->skb = NULL;
2689                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2690                 mac_iocb_ptr++;
2691                 tx_ring_desc++;
2692         }
2693         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2694 }
2695
2696 static void ql_free_tx_resources(struct ql_adapter *qdev,
2697                                  struct tx_ring *tx_ring)
2698 {
2699         if (tx_ring->wq_base) {
2700                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2701                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2702                 tx_ring->wq_base = NULL;
2703         }
2704         kfree(tx_ring->q);
2705         tx_ring->q = NULL;
2706 }
2707
2708 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2709                                  struct tx_ring *tx_ring)
2710 {
2711         tx_ring->wq_base =
2712             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2713                                  &tx_ring->wq_base_dma);
2714
2715         if (!tx_ring->wq_base ||
2716             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2717                 goto pci_alloc_err;
2718
2719         tx_ring->q =
2720             kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2721                           GFP_KERNEL);
2722         if (!tx_ring->q)
2723                 goto err;
2724
2725         return 0;
2726 err:
2727         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728                             tx_ring->wq_base, tx_ring->wq_base_dma);
2729         tx_ring->wq_base = NULL;
2730 pci_alloc_err:
2731         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2732         return -ENOMEM;
2733 }
2734
2735 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2736 {
2737         struct qlge_bq *lbq = &rx_ring->lbq;
2738         unsigned int last_offset;
2739
2740         last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2741         while (lbq->next_to_clean != lbq->next_to_use) {
2742                 struct qlge_bq_desc *lbq_desc =
2743                         &lbq->queue[lbq->next_to_clean];
2744
2745                 if (lbq_desc->p.pg_chunk.offset == last_offset)
2746                         pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
2747                                        ql_lbq_block_size(qdev),
2748                                        PCI_DMA_FROMDEVICE);
2749                 put_page(lbq_desc->p.pg_chunk.page);
2750
2751                 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2752         }
2753
2754         if (rx_ring->master_chunk.page) {
2755                 pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
2756                                ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2757                 put_page(rx_ring->master_chunk.page);
2758                 rx_ring->master_chunk.page = NULL;
2759         }
2760 }
2761
2762 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2763 {
2764         int i;
2765
2766         for (i = 0; i < QLGE_BQ_LEN; i++) {
2767                 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2768
2769                 if (!sbq_desc) {
2770                         netif_err(qdev, ifup, qdev->ndev,
2771                                   "sbq_desc %d is NULL.\n", i);
2772                         return;
2773                 }
2774                 if (sbq_desc->p.skb) {
2775                         pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
2776                                          SMALL_BUF_MAP_SIZE,
2777                                          PCI_DMA_FROMDEVICE);
2778                         dev_kfree_skb(sbq_desc->p.skb);
2779                         sbq_desc->p.skb = NULL;
2780                 }
2781         }
2782 }
2783
2784 /* Free all large and small rx buffers associated
2785  * with the completion queues for this device.
2786  */
2787 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2788 {
2789         int i;
2790
2791         for (i = 0; i < qdev->rx_ring_count; i++) {
2792                 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2793
2794                 if (rx_ring->lbq.queue)
2795                         ql_free_lbq_buffers(qdev, rx_ring);
2796                 if (rx_ring->sbq.queue)
2797                         ql_free_sbq_buffers(qdev, rx_ring);
2798         }
2799 }
2800
2801 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2802 {
2803         int i;
2804
2805         for (i = 0; i < qdev->rss_ring_count; i++)
2806                 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2807                                         HZ / 2);
2808 }
2809
2810 static int qlge_init_bq(struct qlge_bq *bq)
2811 {
2812         struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2813         struct ql_adapter *qdev = rx_ring->qdev;
2814         struct qlge_bq_desc *bq_desc;
2815         __le64 *buf_ptr;
2816         int i;
2817
2818         bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
2819                                         &bq->base_dma);
2820         if (!bq->base) {
2821                 netif_err(qdev, ifup, qdev->ndev,
2822                           "ring %u %s allocation failed.\n", rx_ring->cq_id,
2823                           bq_type_name[bq->type]);
2824                 return -ENOMEM;
2825         }
2826
2827         bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2828                                   GFP_KERNEL);
2829         if (!bq->queue)
2830                 return -ENOMEM;
2831
2832         buf_ptr = bq->base;
2833         bq_desc = &bq->queue[0];
2834         for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2835                 bq_desc->p.skb = NULL;
2836                 bq_desc->index = i;
2837                 bq_desc->buf_ptr = buf_ptr;
2838         }
2839
2840         return 0;
2841 }
2842
2843 static void ql_free_rx_resources(struct ql_adapter *qdev,
2844                                  struct rx_ring *rx_ring)
2845 {
2846         /* Free the small buffer queue. */
2847         if (rx_ring->sbq.base) {
2848                 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2849                                     rx_ring->sbq.base, rx_ring->sbq.base_dma);
2850                 rx_ring->sbq.base = NULL;
2851         }
2852
2853         /* Free the small buffer queue control blocks. */
2854         kfree(rx_ring->sbq.queue);
2855         rx_ring->sbq.queue = NULL;
2856
2857         /* Free the large buffer queue. */
2858         if (rx_ring->lbq.base) {
2859                 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2860                                     rx_ring->lbq.base, rx_ring->lbq.base_dma);
2861                 rx_ring->lbq.base = NULL;
2862         }
2863
2864         /* Free the large buffer queue control blocks. */
2865         kfree(rx_ring->lbq.queue);
2866         rx_ring->lbq.queue = NULL;
2867
2868         /* Free the rx queue. */
2869         if (rx_ring->cq_base) {
2870                 pci_free_consistent(qdev->pdev,
2871                                     rx_ring->cq_size,
2872                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2873                 rx_ring->cq_base = NULL;
2874         }
2875 }
2876
2877 /* Allocate queues and buffers for this completions queue based
2878  * on the values in the parameter structure.
2879  */
2880 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2881                                  struct rx_ring *rx_ring)
2882 {
2883
2884         /*
2885          * Allocate the completion queue for this rx_ring.
2886          */
2887         rx_ring->cq_base =
2888             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2889                                  &rx_ring->cq_base_dma);
2890
2891         if (!rx_ring->cq_base) {
2892                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2893                 return -ENOMEM;
2894         }
2895
2896         if (rx_ring->cq_id < qdev->rss_ring_count &&
2897             (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2898                 ql_free_rx_resources(qdev, rx_ring);
2899                 return -ENOMEM;
2900         }
2901
2902         return 0;
2903 }
2904
2905 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2906 {
2907         struct tx_ring *tx_ring;
2908         struct tx_ring_desc *tx_ring_desc;
2909         int i, j;
2910
2911         /*
2912          * Loop through all queues and free
2913          * any resources.
2914          */
2915         for (j = 0; j < qdev->tx_ring_count; j++) {
2916                 tx_ring = &qdev->tx_ring[j];
2917                 for (i = 0; i < tx_ring->wq_len; i++) {
2918                         tx_ring_desc = &tx_ring->q[i];
2919                         if (tx_ring_desc && tx_ring_desc->skb) {
2920                                 netif_err(qdev, ifdown, qdev->ndev,
2921                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2922                                           tx_ring_desc->skb, j,
2923                                           tx_ring_desc->index);
2924                                 ql_unmap_send(qdev, tx_ring_desc,
2925                                               tx_ring_desc->map_cnt);
2926                                 dev_kfree_skb(tx_ring_desc->skb);
2927                                 tx_ring_desc->skb = NULL;
2928                         }
2929                 }
2930         }
2931 }
2932
2933 static void ql_free_mem_resources(struct ql_adapter *qdev)
2934 {
2935         int i;
2936
2937         for (i = 0; i < qdev->tx_ring_count; i++)
2938                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2939         for (i = 0; i < qdev->rx_ring_count; i++)
2940                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2941         ql_free_shadow_space(qdev);
2942 }
2943
2944 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2945 {
2946         int i;
2947
2948         /* Allocate space for our shadow registers and such. */
2949         if (ql_alloc_shadow_space(qdev))
2950                 return -ENOMEM;
2951
2952         for (i = 0; i < qdev->rx_ring_count; i++) {
2953                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2954                         netif_err(qdev, ifup, qdev->ndev,
2955                                   "RX resource allocation failed.\n");
2956                         goto err_mem;
2957                 }
2958         }
2959         /* Allocate tx queue resources */
2960         for (i = 0; i < qdev->tx_ring_count; i++) {
2961                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2962                         netif_err(qdev, ifup, qdev->ndev,
2963                                   "TX resource allocation failed.\n");
2964                         goto err_mem;
2965                 }
2966         }
2967         return 0;
2968
2969 err_mem:
2970         ql_free_mem_resources(qdev);
2971         return -ENOMEM;
2972 }
2973
2974 /* Set up the rx ring control block and pass it to the chip.
2975  * The control block is defined as
2976  * "Completion Queue Initialization Control Block", or cqicb.
2977  */
2978 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2979 {
2980         struct cqicb *cqicb = &rx_ring->cqicb;
2981         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2982                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2983         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2984                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2985         void __iomem *doorbell_area =
2986             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2987         int err = 0;
2988         u64 tmp;
2989         __le64 *base_indirect_ptr;
2990         int page_entries;
2991
2992         /* Set up the shadow registers for this ring. */
2993         rx_ring->prod_idx_sh_reg = shadow_reg;
2994         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2995         *rx_ring->prod_idx_sh_reg = 0;
2996         shadow_reg += sizeof(u64);
2997         shadow_reg_dma += sizeof(u64);
2998         rx_ring->lbq.base_indirect = shadow_reg;
2999         rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
3000         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3001         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3002         rx_ring->sbq.base_indirect = shadow_reg;
3003         rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
3004
3005         /* PCI doorbell mem area + 0x00 for consumer index register */
3006         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3007         rx_ring->cnsmr_idx = 0;
3008         rx_ring->curr_entry = rx_ring->cq_base;
3009
3010         /* PCI doorbell mem area + 0x04 for valid register */
3011         rx_ring->valid_db_reg = doorbell_area + 0x04;
3012
3013         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3014         rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3015
3016         /* PCI doorbell mem area + 0x1c */
3017         rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3018
3019         memset((void *)cqicb, 0, sizeof(struct cqicb));
3020         cqicb->msix_vect = rx_ring->irq;
3021
3022         cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3023                                  LEN_CPP_CONT);
3024
3025         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3026
3027         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3028
3029         /*
3030          * Set up the control block load flags.
3031          */
3032         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3033             FLAGS_LV |          /* Load MSI-X vector */
3034             FLAGS_LI;           /* Load irq delay values */
3035         if (rx_ring->cq_id < qdev->rss_ring_count) {
3036                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3037                 tmp = (u64)rx_ring->lbq.base_dma;
3038                 base_indirect_ptr = rx_ring->lbq.base_indirect;
3039                 page_entries = 0;
3040                 do {
3041                         *base_indirect_ptr = cpu_to_le64(tmp);
3042                         tmp += DB_PAGE_SIZE;
3043                         base_indirect_ptr++;
3044                         page_entries++;
3045                 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3046                 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3047                 cqicb->lbq_buf_size =
3048                         cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3049                 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3050                 rx_ring->lbq.next_to_use = 0;
3051                 rx_ring->lbq.next_to_clean = 0;
3052
3053                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3054                 tmp = (u64)rx_ring->sbq.base_dma;
3055                 base_indirect_ptr = rx_ring->sbq.base_indirect;
3056                 page_entries = 0;
3057                 do {
3058                         *base_indirect_ptr = cpu_to_le64(tmp);
3059                         tmp += DB_PAGE_SIZE;
3060                         base_indirect_ptr++;
3061                         page_entries++;
3062                 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3063                 cqicb->sbq_addr =
3064                     cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3065                 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3066                 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3067                 rx_ring->sbq.next_to_use = 0;
3068                 rx_ring->sbq.next_to_clean = 0;
3069         }
3070         if (rx_ring->cq_id < qdev->rss_ring_count) {
3071                 /* Inbound completion handling rx_rings run in
3072                  * separate NAPI contexts.
3073                  */
3074                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3075                                64);
3076                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3077                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3078         } else {
3079                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3080                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3081         }
3082         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3083                            CFG_LCQ, rx_ring->cq_id);
3084         if (err) {
3085                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3086                 return err;
3087         }
3088         return err;
3089 }
3090
3091 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3092 {
3093         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3094         void __iomem *doorbell_area =
3095             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3096         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3097             (tx_ring->wq_id * sizeof(u64));
3098         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3099             (tx_ring->wq_id * sizeof(u64));
3100         int err = 0;
3101
3102         /*
3103          * Assign doorbell registers for this tx_ring.
3104          */
3105         /* TX PCI doorbell mem area for tx producer index */
3106         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3107         tx_ring->prod_idx = 0;
3108         /* TX PCI doorbell mem area + 0x04 */
3109         tx_ring->valid_db_reg = doorbell_area + 0x04;
3110
3111         /*
3112          * Assign shadow registers for this tx_ring.
3113          */
3114         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3115         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3116
3117         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3118         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3119                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3120         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3121         wqicb->rid = 0;
3122         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3123
3124         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3125
3126         ql_init_tx_ring(qdev, tx_ring);
3127
3128         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3129                            (u16) tx_ring->wq_id);
3130         if (err) {
3131                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3132                 return err;
3133         }
3134         return err;
3135 }
3136
3137 static void ql_disable_msix(struct ql_adapter *qdev)
3138 {
3139         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3140                 pci_disable_msix(qdev->pdev);
3141                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3142                 kfree(qdev->msi_x_entry);
3143                 qdev->msi_x_entry = NULL;
3144         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3145                 pci_disable_msi(qdev->pdev);
3146                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3147         }
3148 }
3149
3150 /* We start by trying to get the number of vectors
3151  * stored in qdev->intr_count. If we don't get that
3152  * many then we reduce the count and try again.
3153  */
3154 static void ql_enable_msix(struct ql_adapter *qdev)
3155 {
3156         int i, err;
3157
3158         /* Get the MSIX vectors. */
3159         if (qlge_irq_type == MSIX_IRQ) {
3160                 /* Try to alloc space for the msix struct,
3161                  * if it fails then go to MSI/legacy.
3162                  */
3163                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3164                                             sizeof(struct msix_entry),
3165                                             GFP_KERNEL);
3166                 if (!qdev->msi_x_entry) {
3167                         qlge_irq_type = MSI_IRQ;
3168                         goto msi;
3169                 }
3170
3171                 for (i = 0; i < qdev->intr_count; i++)
3172                         qdev->msi_x_entry[i].entry = i;
3173
3174                 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3175                                             1, qdev->intr_count);
3176                 if (err < 0) {
3177                         kfree(qdev->msi_x_entry);
3178                         qdev->msi_x_entry = NULL;
3179                         netif_warn(qdev, ifup, qdev->ndev,
3180                                    "MSI-X Enable failed, trying MSI.\n");
3181                         qlge_irq_type = MSI_IRQ;
3182                 } else {
3183                         qdev->intr_count = err;
3184                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3185                         netif_info(qdev, ifup, qdev->ndev,
3186                                    "MSI-X Enabled, got %d vectors.\n",
3187                                    qdev->intr_count);
3188                         return;
3189                 }
3190         }
3191 msi:
3192         qdev->intr_count = 1;
3193         if (qlge_irq_type == MSI_IRQ) {
3194                 if (!pci_enable_msi(qdev->pdev)) {
3195                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3196                         netif_info(qdev, ifup, qdev->ndev,
3197                                    "Running with MSI interrupts.\n");
3198                         return;
3199                 }
3200         }
3201         qlge_irq_type = LEG_IRQ;
3202         set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3203         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3204                      "Running with legacy interrupts.\n");
3205 }
3206
3207 /* Each vector services 1 RSS ring and and 1 or more
3208  * TX completion rings.  This function loops through
3209  * the TX completion rings and assigns the vector that
3210  * will service it.  An example would be if there are
3211  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3212  * This would mean that vector 0 would service RSS ring 0
3213  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3214  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3215  */
3216 static void ql_set_tx_vect(struct ql_adapter *qdev)
3217 {
3218         int i, j, vect;
3219         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3220
3221         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3222                 /* Assign irq vectors to TX rx_rings.*/
3223                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3224                                          i < qdev->rx_ring_count; i++) {
3225                         if (j == tx_rings_per_vector) {
3226                                 vect++;
3227                                 j = 0;
3228                         }
3229                         qdev->rx_ring[i].irq = vect;
3230                         j++;
3231                 }
3232         } else {
3233                 /* For single vector all rings have an irq
3234                  * of zero.
3235                  */
3236                 for (i = 0; i < qdev->rx_ring_count; i++)
3237                         qdev->rx_ring[i].irq = 0;
3238         }
3239 }
3240
3241 /* Set the interrupt mask for this vector.  Each vector
3242  * will service 1 RSS ring and 1 or more TX completion
3243  * rings.  This function sets up a bit mask per vector
3244  * that indicates which rings it services.
3245  */
3246 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3247 {
3248         int j, vect = ctx->intr;
3249         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3250
3251         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3252                 /* Add the RSS ring serviced by this vector
3253                  * to the mask.
3254                  */
3255                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3256                 /* Add the TX ring(s) serviced by this vector
3257                  * to the mask. */
3258                 for (j = 0; j < tx_rings_per_vector; j++) {
3259                         ctx->irq_mask |=
3260                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3261                         (vect * tx_rings_per_vector) + j].cq_id);
3262                 }
3263         } else {
3264                 /* For single vector we just shift each queue's
3265                  * ID into the mask.
3266                  */
3267                 for (j = 0; j < qdev->rx_ring_count; j++)
3268                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3269         }
3270 }
3271
3272 /*
3273  * Here we build the intr_context structures based on
3274  * our rx_ring count and intr vector count.
3275  * The intr_context structure is used to hook each vector
3276  * to possibly different handlers.
3277  */
3278 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3279 {
3280         int i = 0;
3281         struct intr_context *intr_context = &qdev->intr_context[0];
3282
3283         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3284                 /* Each rx_ring has it's
3285                  * own intr_context since we have separate
3286                  * vectors for each queue.
3287                  */
3288                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3289                         qdev->rx_ring[i].irq = i;
3290                         intr_context->intr = i;
3291                         intr_context->qdev = qdev;
3292                         /* Set up this vector's bit-mask that indicates
3293                          * which queues it services.
3294                          */
3295                         ql_set_irq_mask(qdev, intr_context);
3296                         /*
3297                          * We set up each vectors enable/disable/read bits so
3298                          * there's no bit/mask calculations in the critical path.
3299                          */
3300                         intr_context->intr_en_mask =
3301                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3302                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3303                             | i;
3304                         intr_context->intr_dis_mask =
3305                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3306                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3307                             INTR_EN_IHD | i;
3308                         intr_context->intr_read_mask =
3309                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3310                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3311                             i;
3312                         if (i == 0) {
3313                                 /* The first vector/queue handles
3314                                  * broadcast/multicast, fatal errors,
3315                                  * and firmware events.  This in addition
3316                                  * to normal inbound NAPI processing.
3317                                  */
3318                                 intr_context->handler = qlge_isr;
3319                                 sprintf(intr_context->name, "%s-rx-%d",
3320                                         qdev->ndev->name, i);
3321                         } else {
3322                                 /*
3323                                  * Inbound queues handle unicast frames only.
3324                                  */
3325                                 intr_context->handler = qlge_msix_rx_isr;
3326                                 sprintf(intr_context->name, "%s-rx-%d",
3327                                         qdev->ndev->name, i);
3328                         }
3329                 }
3330         } else {
3331                 /*
3332                  * All rx_rings use the same intr_context since
3333                  * there is only one vector.
3334                  */
3335                 intr_context->intr = 0;
3336                 intr_context->qdev = qdev;
3337                 /*
3338                  * We set up each vectors enable/disable/read bits so
3339                  * there's no bit/mask calculations in the critical path.
3340                  */
3341                 intr_context->intr_en_mask =
3342                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3343                 intr_context->intr_dis_mask =
3344                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3345                     INTR_EN_TYPE_DISABLE;
3346                 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3347                         /* Experience shows that when using INTx interrupts,
3348                          * the device does not always auto-mask INTR_EN_EN.
3349                          * Moreover, masking INTR_EN_EN manually does not
3350                          * immediately prevent interrupt generation.
3351                          */
3352                         intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3353                                 INTR_EN_EI;
3354                         intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3355                 }
3356                 intr_context->intr_read_mask =
3357                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3358                 /*
3359                  * Single interrupt means one handler for all rings.
3360                  */
3361                 intr_context->handler = qlge_isr;
3362                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3363                 /* Set up this vector's bit-mask that indicates
3364                  * which queues it services. In this case there is
3365                  * a single vector so it will service all RSS and
3366                  * TX completion rings.
3367                  */
3368                 ql_set_irq_mask(qdev, intr_context);
3369         }
3370         /* Tell the TX completion rings which MSIx vector
3371          * they will be using.
3372          */
3373         ql_set_tx_vect(qdev);
3374 }
3375
3376 static void ql_free_irq(struct ql_adapter *qdev)
3377 {
3378         int i;
3379         struct intr_context *intr_context = &qdev->intr_context[0];
3380
3381         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3382                 if (intr_context->hooked) {
3383                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3384                                 free_irq(qdev->msi_x_entry[i].vector,
3385                                          &qdev->rx_ring[i]);
3386                         } else {
3387                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3388                         }
3389                 }
3390         }
3391         ql_disable_msix(qdev);
3392 }
3393
3394 static int ql_request_irq(struct ql_adapter *qdev)
3395 {
3396         int i;
3397         int status = 0;
3398         struct pci_dev *pdev = qdev->pdev;
3399         struct intr_context *intr_context = &qdev->intr_context[0];
3400
3401         ql_resolve_queues_to_irqs(qdev);
3402
3403         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3404                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3405                         status = request_irq(qdev->msi_x_entry[i].vector,
3406                                              intr_context->handler,
3407                                              0,
3408                                              intr_context->name,
3409                                              &qdev->rx_ring[i]);
3410                         if (status) {
3411                                 netif_err(qdev, ifup, qdev->ndev,
3412                                           "Failed request for MSIX interrupt %d.\n",
3413                                           i);
3414                                 goto err_irq;
3415                         }
3416                 } else {
3417                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3418                                      "trying msi or legacy interrupts.\n");
3419                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3420                                      "%s: irq = %d.\n", __func__, pdev->irq);
3421                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3422                                      "%s: context->name = %s.\n", __func__,
3423                                      intr_context->name);
3424                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3425                                      "%s: dev_id = 0x%p.\n", __func__,
3426                                      &qdev->rx_ring[0]);
3427                         status =
3428                             request_irq(pdev->irq, qlge_isr,
3429                                         test_bit(QL_MSI_ENABLED,
3430                                                  &qdev->
3431                                                  flags) ? 0 : IRQF_SHARED,
3432                                         intr_context->name, &qdev->rx_ring[0]);
3433                         if (status)
3434                                 goto err_irq;
3435
3436                         netif_err(qdev, ifup, qdev->ndev,
3437                                   "Hooked intr 0, queue type RX_Q, with name %s.\n",
3438                                   intr_context->name);
3439                 }
3440                 intr_context->hooked = 1;
3441         }
3442         return status;
3443 err_irq:
3444         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3445         ql_free_irq(qdev);
3446         return status;
3447 }
3448
3449 static int ql_start_rss(struct ql_adapter *qdev)
3450 {
3451         static const u8 init_hash_seed[] = {
3452                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3453                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3454                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3455                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3456                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3457         };
3458         struct ricb *ricb = &qdev->ricb;
3459         int status = 0;
3460         int i;
3461         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3462
3463         memset((void *)ricb, 0, sizeof(*ricb));
3464
3465         ricb->base_cq = RSS_L4K;
3466         ricb->flags =
3467                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3468         ricb->mask = cpu_to_le16((u16)(0x3ff));
3469
3470         /*
3471          * Fill out the Indirection Table.
3472          */
3473         for (i = 0; i < 1024; i++)
3474                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3475
3476         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3477         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3478
3479         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3480         if (status) {
3481                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3482                 return status;
3483         }
3484         return status;
3485 }
3486
3487 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3488 {
3489         int i, status = 0;
3490
3491         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3492         if (status)
3493                 return status;
3494         /* Clear all the entries in the routing table. */
3495         for (i = 0; i < 16; i++) {
3496                 status = ql_set_routing_reg(qdev, i, 0, 0);
3497                 if (status) {
3498                         netif_err(qdev, ifup, qdev->ndev,
3499                                   "Failed to init routing register for CAM packets.\n");
3500                         break;
3501                 }
3502         }
3503         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3504         return status;
3505 }
3506
3507 /* Initialize the frame-to-queue routing. */
3508 static int ql_route_initialize(struct ql_adapter *qdev)
3509 {
3510         int status = 0;
3511
3512         /* Clear all the entries in the routing table. */
3513         status = ql_clear_routing_entries(qdev);
3514         if (status)
3515                 return status;
3516
3517         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3518         if (status)
3519                 return status;
3520
3521         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3522                                     RT_IDX_IP_CSUM_ERR, 1);
3523         if (status) {
3524                 netif_err(qdev, ifup, qdev->ndev,
3525                           "Failed to init routing register for IP CSUM error packets.\n");
3526                 goto exit;
3527         }
3528         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3529                                     RT_IDX_TU_CSUM_ERR, 1);
3530         if (status) {
3531                 netif_err(qdev, ifup, qdev->ndev,
3532                           "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3533                 goto exit;
3534         }
3535         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3536         if (status) {
3537                 netif_err(qdev, ifup, qdev->ndev,
3538                           "Failed to init routing register for broadcast packets.\n");
3539                 goto exit;
3540         }
3541         /* If we have more than one inbound queue, then turn on RSS in the
3542          * routing block.
3543          */
3544         if (qdev->rss_ring_count > 1) {
3545                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3546                                             RT_IDX_RSS_MATCH, 1);
3547                 if (status) {
3548                         netif_err(qdev, ifup, qdev->ndev,
3549                                   "Failed to init routing register for MATCH RSS packets.\n");
3550                         goto exit;
3551                 }
3552         }
3553
3554         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3555                                     RT_IDX_CAM_HIT, 1);
3556         if (status)
3557                 netif_err(qdev, ifup, qdev->ndev,
3558                           "Failed to init routing register for CAM packets.\n");
3559 exit:
3560         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3561         return status;
3562 }
3563
3564 int ql_cam_route_initialize(struct ql_adapter *qdev)
3565 {
3566         int status, set;
3567
3568         /* If check if the link is up and use to
3569          * determine if we are setting or clearing
3570          * the MAC address in the CAM.
3571          */
3572         set = ql_read32(qdev, STS);
3573         set &= qdev->port_link_up;
3574         status = ql_set_mac_addr(qdev, set);
3575         if (status) {
3576                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3577                 return status;
3578         }
3579
3580         status = ql_route_initialize(qdev);
3581         if (status)
3582                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3583
3584         return status;
3585 }
3586
3587 static int ql_adapter_initialize(struct ql_adapter *qdev)
3588 {
3589         u32 value, mask;
3590         int i;
3591         int status = 0;
3592
3593         /*
3594          * Set up the System register to halt on errors.
3595          */
3596         value = SYS_EFE | SYS_FAE;
3597         mask = value << 16;
3598         ql_write32(qdev, SYS, mask | value);
3599
3600         /* Set the default queue, and VLAN behavior. */
3601         value = NIC_RCV_CFG_DFQ;
3602         mask = NIC_RCV_CFG_DFQ_MASK;
3603         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3604                 value |= NIC_RCV_CFG_RV;
3605                 mask |= (NIC_RCV_CFG_RV << 16);
3606         }
3607         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3608
3609         /* Set the MPI interrupt to enabled. */
3610         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3611
3612         /* Enable the function, set pagesize, enable error checking. */
3613         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3614             FSC_EC | FSC_VM_PAGE_4K;
3615         value |= SPLT_SETTING;
3616
3617         /* Set/clear header splitting. */
3618         mask = FSC_VM_PAGESIZE_MASK |
3619             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3620         ql_write32(qdev, FSC, mask | value);
3621
3622         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3623
3624         /* Set RX packet routing to use port/pci function on which the
3625          * packet arrived on in addition to usual frame routing.
3626          * This is helpful on bonding where both interfaces can have
3627          * the same MAC address.
3628          */
3629         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3630         /* Reroute all packets to our Interface.
3631          * They may have been routed to MPI firmware
3632          * due to WOL.
3633          */
3634         value = ql_read32(qdev, MGMT_RCV_CFG);
3635         value &= ~MGMT_RCV_CFG_RM;
3636         mask = 0xffff0000;
3637
3638         /* Sticky reg needs clearing due to WOL. */
3639         ql_write32(qdev, MGMT_RCV_CFG, mask);
3640         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3641
3642         /* Default WOL is enable on Mezz cards */
3643         if (qdev->pdev->subsystem_device == 0x0068 ||
3644             qdev->pdev->subsystem_device == 0x0180)
3645                 qdev->wol = WAKE_MAGIC;
3646
3647         /* Start up the rx queues. */
3648         for (i = 0; i < qdev->rx_ring_count; i++) {
3649                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3650                 if (status) {
3651                         netif_err(qdev, ifup, qdev->ndev,
3652                                   "Failed to start rx ring[%d].\n", i);
3653                         return status;
3654                 }
3655         }
3656
3657         /* If there is more than one inbound completion queue
3658          * then download a RICB to configure RSS.
3659          */
3660         if (qdev->rss_ring_count > 1) {
3661                 status = ql_start_rss(qdev);
3662                 if (status) {
3663                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3664                         return status;
3665                 }
3666         }
3667
3668         /* Start up the tx queues. */
3669         for (i = 0; i < qdev->tx_ring_count; i++) {
3670                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3671                 if (status) {
3672                         netif_err(qdev, ifup, qdev->ndev,
3673                                   "Failed to start tx ring[%d].\n", i);
3674                         return status;
3675                 }
3676         }
3677
3678         /* Initialize the port and set the max framesize. */
3679         status = qdev->nic_ops->port_initialize(qdev);
3680         if (status)
3681                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3682
3683         /* Set up the MAC address and frame routing filter. */
3684         status = ql_cam_route_initialize(qdev);
3685         if (status) {
3686                 netif_err(qdev, ifup, qdev->ndev,
3687                           "Failed to init CAM/Routing tables.\n");
3688                 return status;
3689         }
3690
3691         /* Start NAPI for the RSS queues. */
3692         for (i = 0; i < qdev->rss_ring_count; i++)
3693                 napi_enable(&qdev->rx_ring[i].napi);
3694
3695         return status;
3696 }
3697
3698 /* Issue soft reset to chip. */
3699 static int ql_adapter_reset(struct ql_adapter *qdev)
3700 {
3701         u32 value;
3702         int status = 0;
3703         unsigned long end_jiffies;
3704
3705         /* Clear all the entries in the routing table. */
3706         status = ql_clear_routing_entries(qdev);
3707         if (status) {
3708                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3709                 return status;
3710         }
3711
3712         /* Check if bit is set then skip the mailbox command and
3713          * clear the bit, else we are in normal reset process.
3714          */
3715         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3716                 /* Stop management traffic. */
3717                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3718
3719                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3720                 ql_wait_fifo_empty(qdev);
3721         } else {
3722                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3723         }
3724
3725         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3726
3727         end_jiffies = jiffies + usecs_to_jiffies(30);
3728         do {
3729                 value = ql_read32(qdev, RST_FO);
3730                 if ((value & RST_FO_FR) == 0)
3731                         break;
3732                 cpu_relax();
3733         } while (time_before(jiffies, end_jiffies));
3734
3735         if (value & RST_FO_FR) {
3736                 netif_err(qdev, ifdown, qdev->ndev,
3737                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3738                 status = -ETIMEDOUT;
3739         }
3740
3741         /* Resume management traffic. */
3742         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3743         return status;
3744 }
3745
3746 static void ql_display_dev_info(struct net_device *ndev)
3747 {
3748         struct ql_adapter *qdev = netdev_priv(ndev);
3749
3750         netif_info(qdev, probe, qdev->ndev,
3751                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3752                    "XG Roll = %d, XG Rev = %d.\n",
3753                    qdev->func,
3754                    qdev->port,
3755                    qdev->chip_rev_id & 0x0000000f,
3756                    qdev->chip_rev_id >> 4 & 0x0000000f,
3757                    qdev->chip_rev_id >> 8 & 0x0000000f,
3758                    qdev->chip_rev_id >> 12 & 0x0000000f);
3759         netif_info(qdev, probe, qdev->ndev,
3760                    "MAC address %pM\n", ndev->dev_addr);
3761 }
3762
3763 static int ql_wol(struct ql_adapter *qdev)
3764 {
3765         int status = 0;
3766         u32 wol = MB_WOL_DISABLE;
3767
3768         /* The CAM is still intact after a reset, but if we
3769          * are doing WOL, then we may need to program the
3770          * routing regs. We would also need to issue the mailbox
3771          * commands to instruct the MPI what to do per the ethtool
3772          * settings.
3773          */
3774
3775         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3776                         WAKE_MCAST | WAKE_BCAST)) {
3777                 netif_err(qdev, ifdown, qdev->ndev,
3778                           "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3779                           qdev->wol);
3780                 return -EINVAL;
3781         }
3782
3783         if (qdev->wol & WAKE_MAGIC) {
3784                 status = ql_mb_wol_set_magic(qdev, 1);
3785                 if (status) {
3786                         netif_err(qdev, ifdown, qdev->ndev,
3787                                   "Failed to set magic packet on %s.\n",
3788                                   qdev->ndev->name);
3789                         return status;
3790                 } else
3791                         netif_info(qdev, drv, qdev->ndev,
3792                                    "Enabled magic packet successfully on %s.\n",
3793                                    qdev->ndev->name);
3794
3795                 wol |= MB_WOL_MAGIC_PKT;
3796         }
3797
3798         if (qdev->wol) {
3799                 wol |= MB_WOL_MODE_ON;
3800                 status = ql_mb_wol_mode(qdev, wol);
3801                 netif_err(qdev, drv, qdev->ndev,
3802                           "WOL %s (wol code 0x%x) on %s\n",
3803                           (status == 0) ? "Successfully set" : "Failed",
3804                           wol, qdev->ndev->name);
3805         }
3806
3807         return status;
3808 }
3809
3810 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3811 {
3812
3813         /* Don't kill the reset worker thread if we
3814          * are in the process of recovery.
3815          */
3816         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3817                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3818         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3819         cancel_delayed_work_sync(&qdev->mpi_work);
3820         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3821         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3822         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3823 }
3824
3825 static int ql_adapter_down(struct ql_adapter *qdev)
3826 {
3827         int i, status = 0;
3828
3829         ql_link_off(qdev);
3830
3831         ql_cancel_all_work_sync(qdev);
3832
3833         for (i = 0; i < qdev->rss_ring_count; i++)
3834                 napi_disable(&qdev->rx_ring[i].napi);
3835
3836         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3837
3838         ql_disable_interrupts(qdev);
3839
3840         ql_tx_ring_clean(qdev);
3841
3842         /* Call netif_napi_del() from common point.
3843          */
3844         for (i = 0; i < qdev->rss_ring_count; i++)
3845                 netif_napi_del(&qdev->rx_ring[i].napi);
3846
3847         status = ql_adapter_reset(qdev);
3848         if (status)
3849                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3850                           qdev->func);
3851         ql_free_rx_buffers(qdev);
3852
3853         return status;
3854 }
3855
3856 static int ql_adapter_up(struct ql_adapter *qdev)
3857 {
3858         int err = 0;
3859
3860         err = ql_adapter_initialize(qdev);
3861         if (err) {
3862                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3863                 goto err_init;
3864         }
3865         set_bit(QL_ADAPTER_UP, &qdev->flags);
3866         ql_alloc_rx_buffers(qdev);
3867         /* If the port is initialized and the
3868          * link is up the turn on the carrier.
3869          */
3870         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3871             (ql_read32(qdev, STS) & qdev->port_link_up))
3872                 ql_link_on(qdev);
3873         /* Restore rx mode. */
3874         clear_bit(QL_ALLMULTI, &qdev->flags);
3875         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3876         qlge_set_multicast_list(qdev->ndev);
3877
3878         /* Restore vlan setting. */
3879         qlge_restore_vlan(qdev);
3880
3881         ql_enable_interrupts(qdev);
3882         ql_enable_all_completion_interrupts(qdev);
3883         netif_tx_start_all_queues(qdev->ndev);
3884
3885         return 0;
3886 err_init:
3887         ql_adapter_reset(qdev);
3888         return err;
3889 }
3890
3891 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3892 {
3893         ql_free_mem_resources(qdev);
3894         ql_free_irq(qdev);
3895 }
3896
3897 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3898 {
3899         int status = 0;
3900
3901         if (ql_alloc_mem_resources(qdev)) {
3902                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3903                 return -ENOMEM;
3904         }
3905         status = ql_request_irq(qdev);
3906         return status;
3907 }
3908
3909 static int qlge_close(struct net_device *ndev)
3910 {
3911         struct ql_adapter *qdev = netdev_priv(ndev);
3912         int i;
3913
3914         /* If we hit pci_channel_io_perm_failure
3915          * failure condition, then we already
3916          * brought the adapter down.
3917          */
3918         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3919                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3920                 clear_bit(QL_EEH_FATAL, &qdev->flags);
3921                 return 0;
3922         }
3923
3924         /*
3925          * Wait for device to recover from a reset.
3926          * (Rarely happens, but possible.)
3927          */
3928         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3929                 msleep(1);
3930
3931         /* Make sure refill_work doesn't re-enable napi */
3932         for (i = 0; i < qdev->rss_ring_count; i++)
3933                 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3934
3935         ql_adapter_down(qdev);
3936         ql_release_adapter_resources(qdev);
3937         return 0;
3938 }
3939
3940 static void qlge_set_lb_size(struct ql_adapter *qdev)
3941 {
3942         if (qdev->ndev->mtu <= 1500)
3943                 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3944         else
3945                 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3946         qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3947 }
3948
3949 static int ql_configure_rings(struct ql_adapter *qdev)
3950 {
3951         int i;
3952         struct rx_ring *rx_ring;
3953         struct tx_ring *tx_ring;
3954         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3955
3956         /* In a perfect world we have one RSS ring for each CPU
3957          * and each has it's own vector.  To do that we ask for
3958          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3959          * vector count to what we actually get.  We then
3960          * allocate an RSS ring for each.
3961          * Essentially, we are doing min(cpu_count, msix_vector_count).
3962          */
3963         qdev->intr_count = cpu_cnt;
3964         ql_enable_msix(qdev);
3965         /* Adjust the RSS ring count to the actual vector count. */
3966         qdev->rss_ring_count = qdev->intr_count;
3967         qdev->tx_ring_count = cpu_cnt;
3968         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3969
3970         for (i = 0; i < qdev->tx_ring_count; i++) {
3971                 tx_ring = &qdev->tx_ring[i];
3972                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3973                 tx_ring->qdev = qdev;
3974                 tx_ring->wq_id = i;
3975                 tx_ring->wq_len = qdev->tx_ring_size;
3976                 tx_ring->wq_size =
3977                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3978
3979                 /*
3980                  * The completion queue ID for the tx rings start
3981                  * immediately after the rss rings.
3982                  */
3983                 tx_ring->cq_id = qdev->rss_ring_count + i;
3984         }
3985
3986         for (i = 0; i < qdev->rx_ring_count; i++) {
3987                 rx_ring = &qdev->rx_ring[i];
3988                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3989                 rx_ring->qdev = qdev;
3990                 rx_ring->cq_id = i;
3991                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3992                 if (i < qdev->rss_ring_count) {
3993                         /*
3994                          * Inbound (RSS) queues.
3995                          */
3996                         rx_ring->cq_len = qdev->rx_ring_size;
3997                         rx_ring->cq_size =
3998                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3999                         rx_ring->lbq.type = QLGE_LB;
4000                         rx_ring->sbq.type = QLGE_SB;
4001                         INIT_DELAYED_WORK(&rx_ring->refill_work,
4002                                           &qlge_slow_refill);
4003                 } else {
4004                         /*
4005                          * Outbound queue handles outbound completions only.
4006                          */
4007                         /* outbound cq is same size as tx_ring it services. */
4008                         rx_ring->cq_len = qdev->tx_ring_size;
4009                         rx_ring->cq_size =
4010                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4011                 }
4012         }
4013         return 0;
4014 }
4015
4016 static int qlge_open(struct net_device *ndev)
4017 {
4018         int err = 0;
4019         struct ql_adapter *qdev = netdev_priv(ndev);
4020
4021         err = ql_adapter_reset(qdev);
4022         if (err)
4023                 return err;
4024
4025         qlge_set_lb_size(qdev);
4026         err = ql_configure_rings(qdev);
4027         if (err)
4028                 return err;
4029
4030         err = ql_get_adapter_resources(qdev);
4031         if (err)
4032                 goto error_up;
4033
4034         err = ql_adapter_up(qdev);
4035         if (err)
4036                 goto error_up;
4037
4038         return err;
4039
4040 error_up:
4041         ql_release_adapter_resources(qdev);
4042         return err;
4043 }
4044
4045 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4046 {
4047         int status;
4048
4049         /* Wait for an outstanding reset to complete. */
4050         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4051                 int i = 4;
4052
4053                 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4054                         netif_err(qdev, ifup, qdev->ndev,
4055                                   "Waiting for adapter UP...\n");
4056                         ssleep(1);
4057                 }
4058
4059                 if (!i) {
4060                         netif_err(qdev, ifup, qdev->ndev,
4061                                   "Timed out waiting for adapter UP\n");
4062                         return -ETIMEDOUT;
4063                 }
4064         }
4065
4066         status = ql_adapter_down(qdev);
4067         if (status)
4068                 goto error;
4069
4070         qlge_set_lb_size(qdev);
4071
4072         status = ql_adapter_up(qdev);
4073         if (status)
4074                 goto error;
4075
4076         return status;
4077 error:
4078         netif_alert(qdev, ifup, qdev->ndev,
4079                     "Driver up/down cycle failed, closing device.\n");
4080         set_bit(QL_ADAPTER_UP, &qdev->flags);
4081         dev_close(qdev->ndev);
4082         return status;
4083 }
4084
4085 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4086 {
4087         struct ql_adapter *qdev = netdev_priv(ndev);
4088         int status;
4089
4090         if (ndev->mtu == 1500 && new_mtu == 9000)
4091                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4092         else if (ndev->mtu == 9000 && new_mtu == 1500)
4093                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4094         else
4095                 return -EINVAL;
4096
4097         queue_delayed_work(qdev->workqueue,
4098                            &qdev->mpi_port_cfg_work, 3 * HZ);
4099
4100         ndev->mtu = new_mtu;
4101
4102         if (!netif_running(qdev->ndev))
4103                 return 0;
4104
4105         status = ql_change_rx_buffers(qdev);
4106         if (status) {
4107                 netif_err(qdev, ifup, qdev->ndev,
4108                           "Changing MTU failed.\n");
4109         }
4110
4111         return status;
4112 }
4113
4114 static struct net_device_stats *qlge_get_stats(struct net_device
4115                                                *ndev)
4116 {
4117         struct ql_adapter *qdev = netdev_priv(ndev);
4118         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4119         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4120         unsigned long pkts, mcast, dropped, errors, bytes;
4121         int i;
4122
4123         /* Get RX stats. */
4124         pkts = mcast = dropped = errors = bytes = 0;
4125         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4126                         pkts += rx_ring->rx_packets;
4127                         bytes += rx_ring->rx_bytes;
4128                         dropped += rx_ring->rx_dropped;
4129                         errors += rx_ring->rx_errors;
4130                         mcast += rx_ring->rx_multicast;
4131         }
4132         ndev->stats.rx_packets = pkts;
4133         ndev->stats.rx_bytes = bytes;
4134         ndev->stats.rx_dropped = dropped;
4135         ndev->stats.rx_errors = errors;
4136         ndev->stats.multicast = mcast;
4137
4138         /* Get TX stats. */
4139         pkts = errors = bytes = 0;
4140         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4141                         pkts += tx_ring->tx_packets;
4142                         bytes += tx_ring->tx_bytes;
4143                         errors += tx_ring->tx_errors;
4144         }
4145         ndev->stats.tx_packets = pkts;
4146         ndev->stats.tx_bytes = bytes;
4147         ndev->stats.tx_errors = errors;
4148         return &ndev->stats;
4149 }
4150
4151 static void qlge_set_multicast_list(struct net_device *ndev)
4152 {
4153         struct ql_adapter *qdev = netdev_priv(ndev);
4154         struct netdev_hw_addr *ha;
4155         int i, status;
4156
4157         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4158         if (status)
4159                 return;
4160         /*
4161          * Set or clear promiscuous mode if a
4162          * transition is taking place.
4163          */
4164         if (ndev->flags & IFF_PROMISC) {
4165                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4166                         if (ql_set_routing_reg
4167                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4168                                 netif_err(qdev, hw, qdev->ndev,
4169                                           "Failed to set promiscuous mode.\n");
4170                         } else {
4171                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4172                         }
4173                 }
4174         } else {
4175                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4176                         if (ql_set_routing_reg
4177                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4178                                 netif_err(qdev, hw, qdev->ndev,
4179                                           "Failed to clear promiscuous mode.\n");
4180                         } else {
4181                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4182                         }
4183                 }
4184         }
4185
4186         /*
4187          * Set or clear all multicast mode if a
4188          * transition is taking place.
4189          */
4190         if ((ndev->flags & IFF_ALLMULTI) ||
4191             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4192                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4193                         if (ql_set_routing_reg
4194                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4195                                 netif_err(qdev, hw, qdev->ndev,
4196                                           "Failed to set all-multi mode.\n");
4197                         } else {
4198                                 set_bit(QL_ALLMULTI, &qdev->flags);
4199                         }
4200                 }
4201         } else {
4202                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4203                         if (ql_set_routing_reg
4204                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4205                                 netif_err(qdev, hw, qdev->ndev,
4206                                           "Failed to clear all-multi mode.\n");
4207                         } else {
4208                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4209                         }
4210                 }
4211         }
4212
4213         if (!netdev_mc_empty(ndev)) {
4214                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4215                 if (status)
4216                         goto exit;
4217                 i = 0;
4218                 netdev_for_each_mc_addr(ha, ndev) {
4219                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4220                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4221                                 netif_err(qdev, hw, qdev->ndev,
4222                                           "Failed to loadmulticast address.\n");
4223                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4224                                 goto exit;
4225                         }
4226                         i++;
4227                 }
4228                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4229                 if (ql_set_routing_reg
4230                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4231                         netif_err(qdev, hw, qdev->ndev,
4232                                   "Failed to set multicast match mode.\n");
4233                 } else {
4234                         set_bit(QL_ALLMULTI, &qdev->flags);
4235                 }
4236         }
4237 exit:
4238         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4239 }
4240
4241 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4242 {
4243         struct ql_adapter *qdev = netdev_priv(ndev);
4244         struct sockaddr *addr = p;
4245         int status;
4246
4247         if (!is_valid_ether_addr(addr->sa_data))
4248                 return -EADDRNOTAVAIL;
4249         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4250         /* Update local copy of current mac address. */
4251         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4252
4253         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4254         if (status)
4255                 return status;
4256         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4257                                      MAC_ADDR_TYPE_CAM_MAC,
4258                                      qdev->func * MAX_CQ);
4259         if (status)
4260                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4261         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4262         return status;
4263 }
4264
4265 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4266 {
4267         struct ql_adapter *qdev = netdev_priv(ndev);
4268         ql_queue_asic_error(qdev);
4269 }
4270
4271 static void ql_asic_reset_work(struct work_struct *work)
4272 {
4273         struct ql_adapter *qdev =
4274             container_of(work, struct ql_adapter, asic_reset_work.work);
4275         int status;
4276         rtnl_lock();
4277         status = ql_adapter_down(qdev);
4278         if (status)
4279                 goto error;
4280
4281         status = ql_adapter_up(qdev);
4282         if (status)
4283                 goto error;
4284
4285         /* Restore rx mode. */
4286         clear_bit(QL_ALLMULTI, &qdev->flags);
4287         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4288         qlge_set_multicast_list(qdev->ndev);
4289
4290         rtnl_unlock();
4291         return;
4292 error:
4293         netif_alert(qdev, ifup, qdev->ndev,
4294                     "Driver up/down cycle failed, closing device\n");
4295
4296         set_bit(QL_ADAPTER_UP, &qdev->flags);
4297         dev_close(qdev->ndev);
4298         rtnl_unlock();
4299 }
4300
4301 static const struct nic_operations qla8012_nic_ops = {
4302         .get_flash              = ql_get_8012_flash_params,
4303         .port_initialize        = ql_8012_port_initialize,
4304 };
4305
4306 static const struct nic_operations qla8000_nic_ops = {
4307         .get_flash              = ql_get_8000_flash_params,
4308         .port_initialize        = ql_8000_port_initialize,
4309 };
4310
4311 /* Find the pcie function number for the other NIC
4312  * on this chip.  Since both NIC functions share a
4313  * common firmware we have the lowest enabled function
4314  * do any common work.  Examples would be resetting
4315  * after a fatal firmware error, or doing a firmware
4316  * coredump.
4317  */
4318 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4319 {
4320         int status = 0;
4321         u32 temp;
4322         u32 nic_func1, nic_func2;
4323
4324         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4325                                  &temp);
4326         if (status)
4327                 return status;
4328
4329         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4330                         MPI_TEST_NIC_FUNC_MASK);
4331         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4332                         MPI_TEST_NIC_FUNC_MASK);
4333
4334         if (qdev->func == nic_func1)
4335                 qdev->alt_func = nic_func2;
4336         else if (qdev->func == nic_func2)
4337                 qdev->alt_func = nic_func1;
4338         else
4339                 status = -EIO;
4340
4341         return status;
4342 }
4343
4344 static int ql_get_board_info(struct ql_adapter *qdev)
4345 {
4346         int status;
4347         qdev->func =
4348             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4349         if (qdev->func > 3)
4350                 return -EIO;
4351
4352         status = ql_get_alt_pcie_func(qdev);
4353         if (status)
4354                 return status;
4355
4356         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4357         if (qdev->port) {
4358                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4359                 qdev->port_link_up = STS_PL1;
4360                 qdev->port_init = STS_PI1;
4361                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4362                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4363         } else {
4364                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4365                 qdev->port_link_up = STS_PL0;
4366                 qdev->port_init = STS_PI0;
4367                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4368                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4369         }
4370         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4371         qdev->device_id = qdev->pdev->device;
4372         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4373                 qdev->nic_ops = &qla8012_nic_ops;
4374         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4375                 qdev->nic_ops = &qla8000_nic_ops;
4376         return status;
4377 }
4378
4379 static void ql_release_all(struct pci_dev *pdev)
4380 {
4381         struct net_device *ndev = pci_get_drvdata(pdev);
4382         struct ql_adapter *qdev = netdev_priv(ndev);
4383
4384         if (qdev->workqueue) {
4385                 destroy_workqueue(qdev->workqueue);
4386                 qdev->workqueue = NULL;
4387         }
4388
4389         if (qdev->reg_base)
4390                 iounmap(qdev->reg_base);
4391         if (qdev->doorbell_area)
4392                 iounmap(qdev->doorbell_area);
4393         vfree(qdev->mpi_coredump);
4394         pci_release_regions(pdev);
4395 }
4396
4397 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4398                           int cards_found)
4399 {
4400         struct ql_adapter *qdev = netdev_priv(ndev);
4401         int err = 0;
4402
4403         memset((void *)qdev, 0, sizeof(*qdev));
4404         err = pci_enable_device(pdev);
4405         if (err) {
4406                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4407                 return err;
4408         }
4409
4410         qdev->ndev = ndev;
4411         qdev->pdev = pdev;
4412         pci_set_drvdata(pdev, ndev);
4413
4414         /* Set PCIe read request size */
4415         err = pcie_set_readrq(pdev, 4096);
4416         if (err) {
4417                 dev_err(&pdev->dev, "Set readrq failed.\n");
4418                 goto err_out1;
4419         }
4420
4421         err = pci_request_regions(pdev, DRV_NAME);
4422         if (err) {
4423                 dev_err(&pdev->dev, "PCI region request failed.\n");
4424                 return err;
4425         }
4426
4427         pci_set_master(pdev);
4428         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4429                 set_bit(QL_DMA64, &qdev->flags);
4430                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4431         } else {
4432                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4433                 if (!err)
4434                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4435         }
4436
4437         if (err) {
4438                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4439                 goto err_out2;
4440         }
4441
4442         /* Set PCIe reset type for EEH to fundamental. */
4443         pdev->needs_freset = 1;
4444         pci_save_state(pdev);
4445         qdev->reg_base =
4446             ioremap(pci_resource_start(pdev, 1),
4447                             pci_resource_len(pdev, 1));
4448         if (!qdev->reg_base) {
4449                 dev_err(&pdev->dev, "Register mapping failed.\n");
4450                 err = -ENOMEM;
4451                 goto err_out2;
4452         }
4453
4454         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4455         qdev->doorbell_area =
4456             ioremap(pci_resource_start(pdev, 3),
4457                             pci_resource_len(pdev, 3));
4458         if (!qdev->doorbell_area) {
4459                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4460                 err = -ENOMEM;
4461                 goto err_out2;
4462         }
4463
4464         err = ql_get_board_info(qdev);
4465         if (err) {
4466                 dev_err(&pdev->dev, "Register access failed.\n");
4467                 err = -EIO;
4468                 goto err_out2;
4469         }
4470         qdev->msg_enable = netif_msg_init(debug, default_msg);
4471         spin_lock_init(&qdev->stats_lock);
4472
4473         if (qlge_mpi_coredump) {
4474                 qdev->mpi_coredump =
4475                         vmalloc(sizeof(struct ql_mpi_coredump));
4476                 if (!qdev->mpi_coredump) {
4477                         err = -ENOMEM;
4478                         goto err_out2;
4479                 }
4480                 if (qlge_force_coredump)
4481                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4482         }
4483         /* make sure the EEPROM is good */
4484         err = qdev->nic_ops->get_flash(qdev);
4485         if (err) {
4486                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4487                 goto err_out2;
4488         }
4489
4490         /* Keep local copy of current mac address. */
4491         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4492
4493         /* Set up the default ring sizes. */
4494         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4495         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4496
4497         /* Set up the coalescing parameters. */
4498         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4499         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4500         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4501         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4502
4503         /*
4504          * Set up the operating parameters.
4505          */
4506         qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4507                                                   ndev->name);
4508         if (!qdev->workqueue) {
4509                 err = -ENOMEM;
4510                 goto err_out2;
4511         }
4512
4513         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4514         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4515         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4516         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4517         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4518         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4519         init_completion(&qdev->ide_completion);
4520         mutex_init(&qdev->mpi_mutex);
4521
4522         if (!cards_found) {
4523                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4524                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4525                          DRV_NAME, DRV_VERSION);
4526         }
4527         return 0;
4528 err_out2:
4529         ql_release_all(pdev);
4530 err_out1:
4531         pci_disable_device(pdev);
4532         return err;
4533 }
4534
4535 static const struct net_device_ops qlge_netdev_ops = {
4536         .ndo_open               = qlge_open,
4537         .ndo_stop               = qlge_close,
4538         .ndo_start_xmit         = qlge_send,
4539         .ndo_change_mtu         = qlge_change_mtu,
4540         .ndo_get_stats          = qlge_get_stats,
4541         .ndo_set_rx_mode        = qlge_set_multicast_list,
4542         .ndo_set_mac_address    = qlge_set_mac_address,
4543         .ndo_validate_addr      = eth_validate_addr,
4544         .ndo_tx_timeout         = qlge_tx_timeout,
4545         .ndo_set_features       = qlge_set_features,
4546         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4547         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4548 };
4549
4550 static void ql_timer(struct timer_list *t)
4551 {
4552         struct ql_adapter *qdev = from_timer(qdev, t, timer);
4553         u32 var = 0;
4554
4555         var = ql_read32(qdev, STS);
4556         if (pci_channel_offline(qdev->pdev)) {
4557                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4558                 return;
4559         }
4560
4561         mod_timer(&qdev->timer, jiffies + (5*HZ));
4562 }
4563
4564 static int qlge_probe(struct pci_dev *pdev,
4565                       const struct pci_device_id *pci_entry)
4566 {
4567         struct net_device *ndev = NULL;
4568         struct ql_adapter *qdev = NULL;
4569         static int cards_found;
4570         int err = 0;
4571
4572         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4573                                  min(MAX_CPUS,
4574                                      netif_get_num_default_rss_queues()));
4575         if (!ndev)
4576                 return -ENOMEM;
4577
4578         err = ql_init_device(pdev, ndev, cards_found);
4579         if (err < 0) {
4580                 free_netdev(ndev);
4581                 return err;
4582         }
4583
4584         qdev = netdev_priv(ndev);
4585         SET_NETDEV_DEV(ndev, &pdev->dev);
4586         ndev->hw_features = NETIF_F_SG |
4587                             NETIF_F_IP_CSUM |
4588                             NETIF_F_TSO |
4589                             NETIF_F_TSO_ECN |
4590                             NETIF_F_HW_VLAN_CTAG_TX |
4591                             NETIF_F_HW_VLAN_CTAG_RX |
4592                             NETIF_F_HW_VLAN_CTAG_FILTER |
4593                             NETIF_F_RXCSUM;
4594         ndev->features = ndev->hw_features;
4595         ndev->vlan_features = ndev->hw_features;
4596         /* vlan gets same features (except vlan filter) */
4597         ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4598                                  NETIF_F_HW_VLAN_CTAG_TX |
4599                                  NETIF_F_HW_VLAN_CTAG_RX);
4600
4601         if (test_bit(QL_DMA64, &qdev->flags))
4602                 ndev->features |= NETIF_F_HIGHDMA;
4603
4604         /*
4605          * Set up net_device structure.
4606          */
4607         ndev->tx_queue_len = qdev->tx_ring_size;
4608         ndev->irq = pdev->irq;
4609
4610         ndev->netdev_ops = &qlge_netdev_ops;
4611         ndev->ethtool_ops = &qlge_ethtool_ops;
4612         ndev->watchdog_timeo = 10 * HZ;
4613
4614         /* MTU range: this driver only supports 1500 or 9000, so this only
4615          * filters out values above or below, and we'll rely on
4616          * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4617          */
4618         ndev->min_mtu = ETH_DATA_LEN;
4619         ndev->max_mtu = 9000;
4620
4621         err = register_netdev(ndev);
4622         if (err) {
4623                 dev_err(&pdev->dev, "net device registration failed.\n");
4624                 ql_release_all(pdev);
4625                 pci_disable_device(pdev);
4626                 free_netdev(ndev);
4627                 return err;
4628         }
4629         /* Start up the timer to trigger EEH if
4630          * the bus goes dead
4631          */
4632         timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4633         mod_timer(&qdev->timer, jiffies + (5*HZ));
4634         ql_link_off(qdev);
4635         ql_display_dev_info(ndev);
4636         atomic_set(&qdev->lb_count, 0);
4637         cards_found++;
4638         return 0;
4639 }
4640
4641 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4642 {
4643         return qlge_send(skb, ndev);
4644 }
4645
4646 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4647 {
4648         return ql_clean_inbound_rx_ring(rx_ring, budget);
4649 }
4650
4651 static void qlge_remove(struct pci_dev *pdev)
4652 {
4653         struct net_device *ndev = pci_get_drvdata(pdev);
4654         struct ql_adapter *qdev = netdev_priv(ndev);
4655         del_timer_sync(&qdev->timer);
4656         ql_cancel_all_work_sync(qdev);
4657         unregister_netdev(ndev);
4658         ql_release_all(pdev);
4659         pci_disable_device(pdev);
4660         free_netdev(ndev);
4661 }
4662
4663 /* Clean up resources without touching hardware. */
4664 static void ql_eeh_close(struct net_device *ndev)
4665 {
4666         int i;
4667         struct ql_adapter *qdev = netdev_priv(ndev);
4668
4669         if (netif_carrier_ok(ndev)) {
4670                 netif_carrier_off(ndev);
4671                 netif_stop_queue(ndev);
4672         }
4673
4674         /* Disabling the timer */
4675         ql_cancel_all_work_sync(qdev);
4676
4677         for (i = 0; i < qdev->rss_ring_count; i++)
4678                 netif_napi_del(&qdev->rx_ring[i].napi);
4679
4680         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4681         ql_tx_ring_clean(qdev);
4682         ql_free_rx_buffers(qdev);
4683         ql_release_adapter_resources(qdev);
4684 }
4685
4686 /*
4687  * This callback is called by the PCI subsystem whenever
4688  * a PCI bus error is detected.
4689  */
4690 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4691                                                enum pci_channel_state state)
4692 {
4693         struct net_device *ndev = pci_get_drvdata(pdev);
4694         struct ql_adapter *qdev = netdev_priv(ndev);
4695
4696         switch (state) {
4697         case pci_channel_io_normal:
4698                 return PCI_ERS_RESULT_CAN_RECOVER;
4699         case pci_channel_io_frozen:
4700                 netif_device_detach(ndev);
4701                 del_timer_sync(&qdev->timer);
4702                 if (netif_running(ndev))
4703                         ql_eeh_close(ndev);
4704                 pci_disable_device(pdev);
4705                 return PCI_ERS_RESULT_NEED_RESET;
4706         case pci_channel_io_perm_failure:
4707                 dev_err(&pdev->dev,
4708                         "%s: pci_channel_io_perm_failure.\n", __func__);
4709                 del_timer_sync(&qdev->timer);
4710                 ql_eeh_close(ndev);
4711                 set_bit(QL_EEH_FATAL, &qdev->flags);
4712                 return PCI_ERS_RESULT_DISCONNECT;
4713         }
4714
4715         /* Request a slot reset. */
4716         return PCI_ERS_RESULT_NEED_RESET;
4717 }
4718
4719 /*
4720  * This callback is called after the PCI buss has been reset.
4721  * Basically, this tries to restart the card from scratch.
4722  * This is a shortened version of the device probe/discovery code,
4723  * it resembles the first-half of the () routine.
4724  */
4725 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4726 {
4727         struct net_device *ndev = pci_get_drvdata(pdev);
4728         struct ql_adapter *qdev = netdev_priv(ndev);
4729
4730         pdev->error_state = pci_channel_io_normal;
4731
4732         pci_restore_state(pdev);
4733         if (pci_enable_device(pdev)) {
4734                 netif_err(qdev, ifup, qdev->ndev,
4735                           "Cannot re-enable PCI device after reset.\n");
4736                 return PCI_ERS_RESULT_DISCONNECT;
4737         }
4738         pci_set_master(pdev);
4739
4740         if (ql_adapter_reset(qdev)) {
4741                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4742                 set_bit(QL_EEH_FATAL, &qdev->flags);
4743                 return PCI_ERS_RESULT_DISCONNECT;
4744         }
4745
4746         return PCI_ERS_RESULT_RECOVERED;
4747 }
4748
4749 static void qlge_io_resume(struct pci_dev *pdev)
4750 {
4751         struct net_device *ndev = pci_get_drvdata(pdev);
4752         struct ql_adapter *qdev = netdev_priv(ndev);
4753         int err = 0;
4754
4755         if (netif_running(ndev)) {
4756                 err = qlge_open(ndev);
4757                 if (err) {
4758                         netif_err(qdev, ifup, qdev->ndev,
4759                                   "Device initialization failed after reset.\n");
4760                         return;
4761                 }
4762         } else {
4763                 netif_err(qdev, ifup, qdev->ndev,
4764                           "Device was not running prior to EEH.\n");
4765         }
4766         mod_timer(&qdev->timer, jiffies + (5*HZ));
4767         netif_device_attach(ndev);
4768 }
4769
4770 static const struct pci_error_handlers qlge_err_handler = {
4771         .error_detected = qlge_io_error_detected,
4772         .slot_reset = qlge_io_slot_reset,
4773         .resume = qlge_io_resume,
4774 };
4775
4776 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4777 {
4778         struct net_device *ndev = pci_get_drvdata(pdev);
4779         struct ql_adapter *qdev = netdev_priv(ndev);
4780         int err;
4781
4782         netif_device_detach(ndev);
4783         del_timer_sync(&qdev->timer);
4784
4785         if (netif_running(ndev)) {
4786                 err = ql_adapter_down(qdev);
4787                 if (!err)
4788                         return err;
4789         }
4790
4791         ql_wol(qdev);
4792         err = pci_save_state(pdev);
4793         if (err)
4794                 return err;
4795
4796         pci_disable_device(pdev);
4797
4798         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4799
4800         return 0;
4801 }
4802
4803 #ifdef CONFIG_PM
4804 static int qlge_resume(struct pci_dev *pdev)
4805 {
4806         struct net_device *ndev = pci_get_drvdata(pdev);
4807         struct ql_adapter *qdev = netdev_priv(ndev);
4808         int err;
4809
4810         pci_set_power_state(pdev, PCI_D0);
4811         pci_restore_state(pdev);
4812         err = pci_enable_device(pdev);
4813         if (err) {
4814                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4815                 return err;
4816         }
4817         pci_set_master(pdev);
4818
4819         pci_enable_wake(pdev, PCI_D3hot, 0);
4820         pci_enable_wake(pdev, PCI_D3cold, 0);
4821
4822         if (netif_running(ndev)) {
4823                 err = ql_adapter_up(qdev);
4824                 if (err)
4825                         return err;
4826         }
4827
4828         mod_timer(&qdev->timer, jiffies + (5*HZ));
4829         netif_device_attach(ndev);
4830
4831         return 0;
4832 }
4833 #endif /* CONFIG_PM */
4834
4835 static void qlge_shutdown(struct pci_dev *pdev)
4836 {
4837         qlge_suspend(pdev, PMSG_SUSPEND);
4838 }
4839
4840 static struct pci_driver qlge_driver = {
4841         .name = DRV_NAME,
4842         .id_table = qlge_pci_tbl,
4843         .probe = qlge_probe,
4844         .remove = qlge_remove,
4845 #ifdef CONFIG_PM
4846         .suspend = qlge_suspend,
4847         .resume = qlge_resume,
4848 #endif
4849         .shutdown = qlge_shutdown,
4850         .err_handler = &qlge_err_handler
4851 };
4852
4853 module_pci_driver(qlge_driver);