Merge back cpufreq updates for v5.11.
[linux-2.6-microblaze.git] / drivers / net / ethernet / ibm / ibmvnic.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual  */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*                                                                         */
16 /* Messages are passed between the VNIC driver and the VNIC server using  */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
20 /* are used by the driver to notify the server that a packet is           */
21 /* ready for transmission or that a buffer has been added to receive a    */
22 /* packet. Subsequently, sCRQs are used by the server to notify the       */
23 /* driver that a packet transmission has been completed or that a packet  */
24 /* has been received and placed in a waiting buffer.                      */
25 /*                                                                        */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
28 /* or receive has been completed, the VNIC driver is required to use      */
29 /* "long term mapping". This entails that large, continuous DMA mapped    */
30 /* buffers are allocated on driver initialization and these buffers are   */
31 /* then continuously reused to pass skbs to and from the VNIC server.     */
32 /*                                                                        */
33 /**************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
62 #include <asm/vio.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
69
70 #include "ibmvnic.h"
71
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88                        union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92                            struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94                             struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96                         struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98                                         struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_query_map(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_query_cap(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
108 static void release_crq_queue(struct ibmvnic_adapter *);
109 static int __ibmvnic_set_mac(struct net_device *, u8 *);
110 static int init_crq_queue(struct ibmvnic_adapter *adapter);
111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
112
113 struct ibmvnic_stat {
114         char name[ETH_GSTRING_LEN];
115         int offset;
116 };
117
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119                              offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123         {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124         {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125         {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126         {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127         {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128         {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129         {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130         {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131         {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132         {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133         {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134         {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135         {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136         {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137         {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138         {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139         {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140         {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141         {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142         {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143         {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144         {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145 };
146
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148                           unsigned long length, unsigned long *number,
149                           unsigned long *irq)
150 {
151         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152         long rc;
153
154         rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155         *number = retbuf[0];
156         *irq = retbuf[1];
157
158         return rc;
159 }
160
161 /**
162  * ibmvnic_wait_for_completion - Check device state and wait for completion
163  * @adapter: private device data
164  * @comp_done: completion structure to wait for
165  * @timeout: time to wait in milliseconds
166  *
167  * Wait for a completion signal or until the timeout limit is reached
168  * while checking that the device is still active.
169  */
170 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171                                        struct completion *comp_done,
172                                        unsigned long timeout)
173 {
174         struct net_device *netdev;
175         unsigned long div_timeout;
176         u8 retry;
177
178         netdev = adapter->netdev;
179         retry = 5;
180         div_timeout = msecs_to_jiffies(timeout / retry);
181         while (true) {
182                 if (!adapter->crq.active) {
183                         netdev_err(netdev, "Device down!\n");
184                         return -ENODEV;
185                 }
186                 if (!retry--)
187                         break;
188                 if (wait_for_completion_timeout(comp_done, div_timeout))
189                         return 0;
190         }
191         netdev_err(netdev, "Operation timed out.\n");
192         return -ETIMEDOUT;
193 }
194
195 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196                                 struct ibmvnic_long_term_buff *ltb, int size)
197 {
198         struct device *dev = &adapter->vdev->dev;
199         int rc;
200
201         ltb->size = size;
202         ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
203                                        GFP_KERNEL);
204
205         if (!ltb->buff) {
206                 dev_err(dev, "Couldn't alloc long term buffer\n");
207                 return -ENOMEM;
208         }
209         ltb->map_id = adapter->map_id;
210         adapter->map_id++;
211
212         mutex_lock(&adapter->fw_lock);
213         adapter->fw_done_rc = 0;
214         reinit_completion(&adapter->fw_done);
215         rc = send_request_map(adapter, ltb->addr,
216                               ltb->size, ltb->map_id);
217         if (rc) {
218                 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
219                 mutex_unlock(&adapter->fw_lock);
220                 return rc;
221         }
222
223         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224         if (rc) {
225                 dev_err(dev,
226                         "Long term map request aborted or timed out,rc = %d\n",
227                         rc);
228                 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
229                 mutex_unlock(&adapter->fw_lock);
230                 return rc;
231         }
232
233         if (adapter->fw_done_rc) {
234                 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235                         adapter->fw_done_rc);
236                 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
237                 mutex_unlock(&adapter->fw_lock);
238                 return -1;
239         }
240         mutex_unlock(&adapter->fw_lock);
241         return 0;
242 }
243
244 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245                                 struct ibmvnic_long_term_buff *ltb)
246 {
247         struct device *dev = &adapter->vdev->dev;
248
249         if (!ltb->buff)
250                 return;
251
252         if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253             adapter->reset_reason != VNIC_RESET_MOBILITY)
254                 send_request_unmap(adapter, ltb->map_id);
255         dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
256 }
257
258 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259                                 struct ibmvnic_long_term_buff *ltb)
260 {
261         struct device *dev = &adapter->vdev->dev;
262         int rc;
263
264         memset(ltb->buff, 0, ltb->size);
265
266         mutex_lock(&adapter->fw_lock);
267         adapter->fw_done_rc = 0;
268
269         reinit_completion(&adapter->fw_done);
270         rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
271         if (rc) {
272                 mutex_unlock(&adapter->fw_lock);
273                 return rc;
274         }
275
276         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277         if (rc) {
278                 dev_info(dev,
279                          "Reset failed, long term map request timed out or aborted\n");
280                 mutex_unlock(&adapter->fw_lock);
281                 return rc;
282         }
283
284         if (adapter->fw_done_rc) {
285                 dev_info(dev,
286                          "Reset failed, attempting to free and reallocate buffer\n");
287                 free_long_term_buff(adapter, ltb);
288                 mutex_unlock(&adapter->fw_lock);
289                 return alloc_long_term_buff(adapter, ltb, ltb->size);
290         }
291         mutex_unlock(&adapter->fw_lock);
292         return 0;
293 }
294
295 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
296 {
297         int i;
298
299         for (i = 0; i < adapter->num_active_rx_pools; i++)
300                 adapter->rx_pool[i].active = 0;
301 }
302
303 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304                               struct ibmvnic_rx_pool *pool)
305 {
306         int count = pool->size - atomic_read(&pool->available);
307         u64 handle = adapter->rx_scrq[pool->index]->handle;
308         struct device *dev = &adapter->vdev->dev;
309         int buffers_added = 0;
310         unsigned long lpar_rc;
311         union sub_crq sub_crq;
312         struct sk_buff *skb;
313         unsigned int offset;
314         dma_addr_t dma_addr;
315         unsigned char *dst;
316         int shift = 0;
317         int index;
318         int i;
319
320         if (!pool->active)
321                 return;
322
323         for (i = 0; i < count; ++i) {
324                 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325                 if (!skb) {
326                         dev_err(dev, "Couldn't replenish rx buff\n");
327                         adapter->replenish_no_mem++;
328                         break;
329                 }
330
331                 index = pool->free_map[pool->next_free];
332
333                 if (pool->rx_buff[index].skb)
334                         dev_err(dev, "Inconsistent free_map!\n");
335
336                 /* Copy the skb to the long term mapped DMA buffer */
337                 offset = index * pool->buff_size;
338                 dst = pool->long_term_buff.buff + offset;
339                 memset(dst, 0, pool->buff_size);
340                 dma_addr = pool->long_term_buff.addr + offset;
341                 pool->rx_buff[index].data = dst;
342
343                 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344                 pool->rx_buff[index].dma = dma_addr;
345                 pool->rx_buff[index].skb = skb;
346                 pool->rx_buff[index].pool_index = pool->index;
347                 pool->rx_buff[index].size = pool->buff_size;
348
349                 memset(&sub_crq, 0, sizeof(sub_crq));
350                 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351                 sub_crq.rx_add.correlator =
352                     cpu_to_be64((u64)&pool->rx_buff[index]);
353                 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354                 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
355
356                 /* The length field of the sCRQ is defined to be 24 bits so the
357                  * buffer size needs to be left shifted by a byte before it is
358                  * converted to big endian to prevent the last byte from being
359                  * truncated.
360                  */
361 #ifdef __LITTLE_ENDIAN__
362                 shift = 8;
363 #endif
364                 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365
366                 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
367                 if (lpar_rc != H_SUCCESS)
368                         goto failure;
369
370                 buffers_added++;
371                 adapter->replenish_add_buff_success++;
372                 pool->next_free = (pool->next_free + 1) % pool->size;
373         }
374         atomic_add(buffers_added, &pool->available);
375         return;
376
377 failure:
378         if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379                 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
380         pool->free_map[pool->next_free] = index;
381         pool->rx_buff[index].skb = NULL;
382
383         dev_kfree_skb_any(skb);
384         adapter->replenish_add_buff_failure++;
385         atomic_add(buffers_added, &pool->available);
386
387         if (lpar_rc == H_CLOSED || adapter->failover_pending) {
388                 /* Disable buffer pool replenishment and report carrier off if
389                  * queue is closed or pending failover.
390                  * Firmware guarantees that a signal will be sent to the
391                  * driver, triggering a reset.
392                  */
393                 deactivate_rx_pools(adapter);
394                 netif_carrier_off(adapter->netdev);
395         }
396 }
397
398 static void replenish_pools(struct ibmvnic_adapter *adapter)
399 {
400         int i;
401
402         adapter->replenish_task_cycles++;
403         for (i = 0; i < adapter->num_active_rx_pools; i++) {
404                 if (adapter->rx_pool[i].active)
405                         replenish_rx_pool(adapter, &adapter->rx_pool[i]);
406         }
407 }
408
409 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
410 {
411         kfree(adapter->tx_stats_buffers);
412         kfree(adapter->rx_stats_buffers);
413         adapter->tx_stats_buffers = NULL;
414         adapter->rx_stats_buffers = NULL;
415 }
416
417 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
418 {
419         adapter->tx_stats_buffers =
420                                 kcalloc(IBMVNIC_MAX_QUEUES,
421                                         sizeof(struct ibmvnic_tx_queue_stats),
422                                         GFP_KERNEL);
423         if (!adapter->tx_stats_buffers)
424                 return -ENOMEM;
425
426         adapter->rx_stats_buffers =
427                                 kcalloc(IBMVNIC_MAX_QUEUES,
428                                         sizeof(struct ibmvnic_rx_queue_stats),
429                                         GFP_KERNEL);
430         if (!adapter->rx_stats_buffers)
431                 return -ENOMEM;
432
433         return 0;
434 }
435
436 static void release_stats_token(struct ibmvnic_adapter *adapter)
437 {
438         struct device *dev = &adapter->vdev->dev;
439
440         if (!adapter->stats_token)
441                 return;
442
443         dma_unmap_single(dev, adapter->stats_token,
444                          sizeof(struct ibmvnic_statistics),
445                          DMA_FROM_DEVICE);
446         adapter->stats_token = 0;
447 }
448
449 static int init_stats_token(struct ibmvnic_adapter *adapter)
450 {
451         struct device *dev = &adapter->vdev->dev;
452         dma_addr_t stok;
453
454         stok = dma_map_single(dev, &adapter->stats,
455                               sizeof(struct ibmvnic_statistics),
456                               DMA_FROM_DEVICE);
457         if (dma_mapping_error(dev, stok)) {
458                 dev_err(dev, "Couldn't map stats buffer\n");
459                 return -1;
460         }
461
462         adapter->stats_token = stok;
463         netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
464         return 0;
465 }
466
467 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
468 {
469         struct ibmvnic_rx_pool *rx_pool;
470         u64 buff_size;
471         int rx_scrqs;
472         int i, j, rc;
473
474         if (!adapter->rx_pool)
475                 return -1;
476
477         buff_size = adapter->cur_rx_buf_sz;
478         rx_scrqs = adapter->num_active_rx_pools;
479         for (i = 0; i < rx_scrqs; i++) {
480                 rx_pool = &adapter->rx_pool[i];
481
482                 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
483
484                 if (rx_pool->buff_size != buff_size) {
485                         free_long_term_buff(adapter, &rx_pool->long_term_buff);
486                         rx_pool->buff_size = buff_size;
487                         rc = alloc_long_term_buff(adapter,
488                                                   &rx_pool->long_term_buff,
489                                                   rx_pool->size *
490                                                   rx_pool->buff_size);
491                 } else {
492                         rc = reset_long_term_buff(adapter,
493                                                   &rx_pool->long_term_buff);
494                 }
495
496                 if (rc)
497                         return rc;
498
499                 for (j = 0; j < rx_pool->size; j++)
500                         rx_pool->free_map[j] = j;
501
502                 memset(rx_pool->rx_buff, 0,
503                        rx_pool->size * sizeof(struct ibmvnic_rx_buff));
504
505                 atomic_set(&rx_pool->available, 0);
506                 rx_pool->next_alloc = 0;
507                 rx_pool->next_free = 0;
508                 rx_pool->active = 1;
509         }
510
511         return 0;
512 }
513
514 static void release_rx_pools(struct ibmvnic_adapter *adapter)
515 {
516         struct ibmvnic_rx_pool *rx_pool;
517         int i, j;
518
519         if (!adapter->rx_pool)
520                 return;
521
522         for (i = 0; i < adapter->num_active_rx_pools; i++) {
523                 rx_pool = &adapter->rx_pool[i];
524
525                 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
526
527                 kfree(rx_pool->free_map);
528                 free_long_term_buff(adapter, &rx_pool->long_term_buff);
529
530                 if (!rx_pool->rx_buff)
531                         continue;
532
533                 for (j = 0; j < rx_pool->size; j++) {
534                         if (rx_pool->rx_buff[j].skb) {
535                                 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
536                                 rx_pool->rx_buff[j].skb = NULL;
537                         }
538                 }
539
540                 kfree(rx_pool->rx_buff);
541         }
542
543         kfree(adapter->rx_pool);
544         adapter->rx_pool = NULL;
545         adapter->num_active_rx_pools = 0;
546 }
547
548 static int init_rx_pools(struct net_device *netdev)
549 {
550         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
551         struct device *dev = &adapter->vdev->dev;
552         struct ibmvnic_rx_pool *rx_pool;
553         int rxadd_subcrqs;
554         u64 buff_size;
555         int i, j;
556
557         rxadd_subcrqs = adapter->num_active_rx_scrqs;
558         buff_size = adapter->cur_rx_buf_sz;
559
560         adapter->rx_pool = kcalloc(rxadd_subcrqs,
561                                    sizeof(struct ibmvnic_rx_pool),
562                                    GFP_KERNEL);
563         if (!adapter->rx_pool) {
564                 dev_err(dev, "Failed to allocate rx pools\n");
565                 return -1;
566         }
567
568         adapter->num_active_rx_pools = rxadd_subcrqs;
569
570         for (i = 0; i < rxadd_subcrqs; i++) {
571                 rx_pool = &adapter->rx_pool[i];
572
573                 netdev_dbg(adapter->netdev,
574                            "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
575                            i, adapter->req_rx_add_entries_per_subcrq,
576                            buff_size);
577
578                 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
579                 rx_pool->index = i;
580                 rx_pool->buff_size = buff_size;
581                 rx_pool->active = 1;
582
583                 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
584                                             GFP_KERNEL);
585                 if (!rx_pool->free_map) {
586                         release_rx_pools(adapter);
587                         return -1;
588                 }
589
590                 rx_pool->rx_buff = kcalloc(rx_pool->size,
591                                            sizeof(struct ibmvnic_rx_buff),
592                                            GFP_KERNEL);
593                 if (!rx_pool->rx_buff) {
594                         dev_err(dev, "Couldn't alloc rx buffers\n");
595                         release_rx_pools(adapter);
596                         return -1;
597                 }
598
599                 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
600                                          rx_pool->size * rx_pool->buff_size)) {
601                         release_rx_pools(adapter);
602                         return -1;
603                 }
604
605                 for (j = 0; j < rx_pool->size; ++j)
606                         rx_pool->free_map[j] = j;
607
608                 atomic_set(&rx_pool->available, 0);
609                 rx_pool->next_alloc = 0;
610                 rx_pool->next_free = 0;
611         }
612
613         return 0;
614 }
615
616 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
617                              struct ibmvnic_tx_pool *tx_pool)
618 {
619         int rc, i;
620
621         rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
622         if (rc)
623                 return rc;
624
625         memset(tx_pool->tx_buff, 0,
626                tx_pool->num_buffers *
627                sizeof(struct ibmvnic_tx_buff));
628
629         for (i = 0; i < tx_pool->num_buffers; i++)
630                 tx_pool->free_map[i] = i;
631
632         tx_pool->consumer_index = 0;
633         tx_pool->producer_index = 0;
634
635         return 0;
636 }
637
638 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
639 {
640         int tx_scrqs;
641         int i, rc;
642
643         if (!adapter->tx_pool)
644                 return -1;
645
646         tx_scrqs = adapter->num_active_tx_pools;
647         for (i = 0; i < tx_scrqs; i++) {
648                 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
649                 if (rc)
650                         return rc;
651                 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
652                 if (rc)
653                         return rc;
654         }
655
656         return 0;
657 }
658
659 static void release_vpd_data(struct ibmvnic_adapter *adapter)
660 {
661         if (!adapter->vpd)
662                 return;
663
664         kfree(adapter->vpd->buff);
665         kfree(adapter->vpd);
666
667         adapter->vpd = NULL;
668 }
669
670 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
671                                 struct ibmvnic_tx_pool *tx_pool)
672 {
673         kfree(tx_pool->tx_buff);
674         kfree(tx_pool->free_map);
675         free_long_term_buff(adapter, &tx_pool->long_term_buff);
676 }
677
678 static void release_tx_pools(struct ibmvnic_adapter *adapter)
679 {
680         int i;
681
682         if (!adapter->tx_pool)
683                 return;
684
685         for (i = 0; i < adapter->num_active_tx_pools; i++) {
686                 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
687                 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
688         }
689
690         kfree(adapter->tx_pool);
691         adapter->tx_pool = NULL;
692         kfree(adapter->tso_pool);
693         adapter->tso_pool = NULL;
694         adapter->num_active_tx_pools = 0;
695 }
696
697 static int init_one_tx_pool(struct net_device *netdev,
698                             struct ibmvnic_tx_pool *tx_pool,
699                             int num_entries, int buf_size)
700 {
701         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
702         int i;
703
704         tx_pool->tx_buff = kcalloc(num_entries,
705                                    sizeof(struct ibmvnic_tx_buff),
706                                    GFP_KERNEL);
707         if (!tx_pool->tx_buff)
708                 return -1;
709
710         if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
711                                  num_entries * buf_size))
712                 return -1;
713
714         tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
715         if (!tx_pool->free_map)
716                 return -1;
717
718         for (i = 0; i < num_entries; i++)
719                 tx_pool->free_map[i] = i;
720
721         tx_pool->consumer_index = 0;
722         tx_pool->producer_index = 0;
723         tx_pool->num_buffers = num_entries;
724         tx_pool->buf_size = buf_size;
725
726         return 0;
727 }
728
729 static int init_tx_pools(struct net_device *netdev)
730 {
731         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
732         int tx_subcrqs;
733         int i, rc;
734
735         tx_subcrqs = adapter->num_active_tx_scrqs;
736         adapter->tx_pool = kcalloc(tx_subcrqs,
737                                    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
738         if (!adapter->tx_pool)
739                 return -1;
740
741         adapter->tso_pool = kcalloc(tx_subcrqs,
742                                     sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
743         if (!adapter->tso_pool)
744                 return -1;
745
746         adapter->num_active_tx_pools = tx_subcrqs;
747
748         for (i = 0; i < tx_subcrqs; i++) {
749                 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
750                                       adapter->req_tx_entries_per_subcrq,
751                                       adapter->req_mtu + VLAN_HLEN);
752                 if (rc) {
753                         release_tx_pools(adapter);
754                         return rc;
755                 }
756
757                 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
758                                       IBMVNIC_TSO_BUFS,
759                                       IBMVNIC_TSO_BUF_SZ);
760                 if (rc) {
761                         release_tx_pools(adapter);
762                         return rc;
763                 }
764         }
765
766         return 0;
767 }
768
769 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
770 {
771         int i;
772
773         if (adapter->napi_enabled)
774                 return;
775
776         for (i = 0; i < adapter->req_rx_queues; i++)
777                 napi_enable(&adapter->napi[i]);
778
779         adapter->napi_enabled = true;
780 }
781
782 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
783 {
784         int i;
785
786         if (!adapter->napi_enabled)
787                 return;
788
789         for (i = 0; i < adapter->req_rx_queues; i++) {
790                 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
791                 napi_disable(&adapter->napi[i]);
792         }
793
794         adapter->napi_enabled = false;
795 }
796
797 static int init_napi(struct ibmvnic_adapter *adapter)
798 {
799         int i;
800
801         adapter->napi = kcalloc(adapter->req_rx_queues,
802                                 sizeof(struct napi_struct), GFP_KERNEL);
803         if (!adapter->napi)
804                 return -ENOMEM;
805
806         for (i = 0; i < adapter->req_rx_queues; i++) {
807                 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
808                 netif_napi_add(adapter->netdev, &adapter->napi[i],
809                                ibmvnic_poll, NAPI_POLL_WEIGHT);
810         }
811
812         adapter->num_active_rx_napi = adapter->req_rx_queues;
813         return 0;
814 }
815
816 static void release_napi(struct ibmvnic_adapter *adapter)
817 {
818         int i;
819
820         if (!adapter->napi)
821                 return;
822
823         for (i = 0; i < adapter->num_active_rx_napi; i++) {
824                 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
825                 netif_napi_del(&adapter->napi[i]);
826         }
827
828         kfree(adapter->napi);
829         adapter->napi = NULL;
830         adapter->num_active_rx_napi = 0;
831         adapter->napi_enabled = false;
832 }
833
834 static int ibmvnic_login(struct net_device *netdev)
835 {
836         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
837         unsigned long timeout = msecs_to_jiffies(30000);
838         int retry_count = 0;
839         int retries = 10;
840         bool retry;
841         int rc;
842
843         do {
844                 retry = false;
845                 if (retry_count > retries) {
846                         netdev_warn(netdev, "Login attempts exceeded\n");
847                         return -1;
848                 }
849
850                 adapter->init_done_rc = 0;
851                 reinit_completion(&adapter->init_done);
852                 rc = send_login(adapter);
853                 if (rc) {
854                         netdev_warn(netdev, "Unable to login\n");
855                         return rc;
856                 }
857
858                 if (!wait_for_completion_timeout(&adapter->init_done,
859                                                  timeout)) {
860                         netdev_warn(netdev, "Login timed out, retrying...\n");
861                         retry = true;
862                         adapter->init_done_rc = 0;
863                         retry_count++;
864                         continue;
865                 }
866
867                 if (adapter->init_done_rc == ABORTED) {
868                         netdev_warn(netdev, "Login aborted, retrying...\n");
869                         retry = true;
870                         adapter->init_done_rc = 0;
871                         retry_count++;
872                         /* FW or device may be busy, so
873                          * wait a bit before retrying login
874                          */
875                         msleep(500);
876                 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
877                         retry_count++;
878                         release_sub_crqs(adapter, 1);
879
880                         retry = true;
881                         netdev_dbg(netdev,
882                                    "Received partial success, retrying...\n");
883                         adapter->init_done_rc = 0;
884                         reinit_completion(&adapter->init_done);
885                         send_query_cap(adapter);
886                         if (!wait_for_completion_timeout(&adapter->init_done,
887                                                          timeout)) {
888                                 netdev_warn(netdev,
889                                             "Capabilities query timed out\n");
890                                 return -1;
891                         }
892
893                         rc = init_sub_crqs(adapter);
894                         if (rc) {
895                                 netdev_warn(netdev,
896                                             "SCRQ initialization failed\n");
897                                 return -1;
898                         }
899
900                         rc = init_sub_crq_irqs(adapter);
901                         if (rc) {
902                                 netdev_warn(netdev,
903                                             "SCRQ irq initialization failed\n");
904                                 return -1;
905                         }
906                 } else if (adapter->init_done_rc) {
907                         netdev_warn(netdev, "Adapter login failed\n");
908                         return -1;
909                 }
910         } while (retry);
911
912         __ibmvnic_set_mac(netdev, adapter->mac_addr);
913
914         return 0;
915 }
916
917 static void release_login_buffer(struct ibmvnic_adapter *adapter)
918 {
919         kfree(adapter->login_buf);
920         adapter->login_buf = NULL;
921 }
922
923 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
924 {
925         kfree(adapter->login_rsp_buf);
926         adapter->login_rsp_buf = NULL;
927 }
928
929 static void release_resources(struct ibmvnic_adapter *adapter)
930 {
931         release_vpd_data(adapter);
932
933         release_tx_pools(adapter);
934         release_rx_pools(adapter);
935
936         release_napi(adapter);
937         release_login_rsp_buffer(adapter);
938 }
939
940 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
941 {
942         struct net_device *netdev = adapter->netdev;
943         unsigned long timeout = msecs_to_jiffies(30000);
944         union ibmvnic_crq crq;
945         bool resend;
946         int rc;
947
948         netdev_dbg(netdev, "setting link state %d\n", link_state);
949
950         memset(&crq, 0, sizeof(crq));
951         crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
952         crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
953         crq.logical_link_state.link_state = link_state;
954
955         do {
956                 resend = false;
957
958                 reinit_completion(&adapter->init_done);
959                 rc = ibmvnic_send_crq(adapter, &crq);
960                 if (rc) {
961                         netdev_err(netdev, "Failed to set link state\n");
962                         return rc;
963                 }
964
965                 if (!wait_for_completion_timeout(&adapter->init_done,
966                                                  timeout)) {
967                         netdev_err(netdev, "timeout setting link state\n");
968                         return -1;
969                 }
970
971                 if (adapter->init_done_rc == PARTIALSUCCESS) {
972                         /* Partuial success, delay and re-send */
973                         mdelay(1000);
974                         resend = true;
975                 } else if (adapter->init_done_rc) {
976                         netdev_warn(netdev, "Unable to set link state, rc=%d\n",
977                                     adapter->init_done_rc);
978                         return adapter->init_done_rc;
979                 }
980         } while (resend);
981
982         return 0;
983 }
984
985 static int set_real_num_queues(struct net_device *netdev)
986 {
987         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
988         int rc;
989
990         netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
991                    adapter->req_tx_queues, adapter->req_rx_queues);
992
993         rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
994         if (rc) {
995                 netdev_err(netdev, "failed to set the number of tx queues\n");
996                 return rc;
997         }
998
999         rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1000         if (rc)
1001                 netdev_err(netdev, "failed to set the number of rx queues\n");
1002
1003         return rc;
1004 }
1005
1006 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1007 {
1008         struct device *dev = &adapter->vdev->dev;
1009         union ibmvnic_crq crq;
1010         int len = 0;
1011         int rc;
1012
1013         if (adapter->vpd->buff)
1014                 len = adapter->vpd->len;
1015
1016         mutex_lock(&adapter->fw_lock);
1017         adapter->fw_done_rc = 0;
1018         reinit_completion(&adapter->fw_done);
1019
1020         crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1021         crq.get_vpd_size.cmd = GET_VPD_SIZE;
1022         rc = ibmvnic_send_crq(adapter, &crq);
1023         if (rc) {
1024                 mutex_unlock(&adapter->fw_lock);
1025                 return rc;
1026         }
1027
1028         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1029         if (rc) {
1030                 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1031                 mutex_unlock(&adapter->fw_lock);
1032                 return rc;
1033         }
1034         mutex_unlock(&adapter->fw_lock);
1035
1036         if (!adapter->vpd->len)
1037                 return -ENODATA;
1038
1039         if (!adapter->vpd->buff)
1040                 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1041         else if (adapter->vpd->len != len)
1042                 adapter->vpd->buff =
1043                         krealloc(adapter->vpd->buff,
1044                                  adapter->vpd->len, GFP_KERNEL);
1045
1046         if (!adapter->vpd->buff) {
1047                 dev_err(dev, "Could allocate VPD buffer\n");
1048                 return -ENOMEM;
1049         }
1050
1051         adapter->vpd->dma_addr =
1052                 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1053                                DMA_FROM_DEVICE);
1054         if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1055                 dev_err(dev, "Could not map VPD buffer\n");
1056                 kfree(adapter->vpd->buff);
1057                 adapter->vpd->buff = NULL;
1058                 return -ENOMEM;
1059         }
1060
1061         mutex_lock(&adapter->fw_lock);
1062         adapter->fw_done_rc = 0;
1063         reinit_completion(&adapter->fw_done);
1064
1065         crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1066         crq.get_vpd.cmd = GET_VPD;
1067         crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1068         crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1069         rc = ibmvnic_send_crq(adapter, &crq);
1070         if (rc) {
1071                 kfree(adapter->vpd->buff);
1072                 adapter->vpd->buff = NULL;
1073                 mutex_unlock(&adapter->fw_lock);
1074                 return rc;
1075         }
1076
1077         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1078         if (rc) {
1079                 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1080                 kfree(adapter->vpd->buff);
1081                 adapter->vpd->buff = NULL;
1082                 mutex_unlock(&adapter->fw_lock);
1083                 return rc;
1084         }
1085
1086         mutex_unlock(&adapter->fw_lock);
1087         return 0;
1088 }
1089
1090 static int init_resources(struct ibmvnic_adapter *adapter)
1091 {
1092         struct net_device *netdev = adapter->netdev;
1093         int rc;
1094
1095         rc = set_real_num_queues(netdev);
1096         if (rc)
1097                 return rc;
1098
1099         adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1100         if (!adapter->vpd)
1101                 return -ENOMEM;
1102
1103         /* Vital Product Data (VPD) */
1104         rc = ibmvnic_get_vpd(adapter);
1105         if (rc) {
1106                 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1107                 return rc;
1108         }
1109
1110         adapter->map_id = 1;
1111
1112         rc = init_napi(adapter);
1113         if (rc)
1114                 return rc;
1115
1116         send_query_map(adapter);
1117
1118         rc = init_rx_pools(netdev);
1119         if (rc)
1120                 return rc;
1121
1122         rc = init_tx_pools(netdev);
1123         return rc;
1124 }
1125
1126 static int __ibmvnic_open(struct net_device *netdev)
1127 {
1128         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1129         enum vnic_state prev_state = adapter->state;
1130         int i, rc;
1131
1132         adapter->state = VNIC_OPENING;
1133         replenish_pools(adapter);
1134         ibmvnic_napi_enable(adapter);
1135
1136         /* We're ready to receive frames, enable the sub-crq interrupts and
1137          * set the logical link state to up
1138          */
1139         for (i = 0; i < adapter->req_rx_queues; i++) {
1140                 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1141                 if (prev_state == VNIC_CLOSED)
1142                         enable_irq(adapter->rx_scrq[i]->irq);
1143                 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1144         }
1145
1146         for (i = 0; i < adapter->req_tx_queues; i++) {
1147                 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1148                 if (prev_state == VNIC_CLOSED)
1149                         enable_irq(adapter->tx_scrq[i]->irq);
1150                 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1151         }
1152
1153         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1154         if (rc) {
1155                 for (i = 0; i < adapter->req_rx_queues; i++)
1156                         napi_disable(&adapter->napi[i]);
1157                 release_resources(adapter);
1158                 return rc;
1159         }
1160
1161         netif_tx_start_all_queues(netdev);
1162
1163         if (prev_state == VNIC_CLOSED) {
1164                 for (i = 0; i < adapter->req_rx_queues; i++)
1165                         napi_schedule(&adapter->napi[i]);
1166         }
1167
1168         adapter->state = VNIC_OPEN;
1169         return rc;
1170 }
1171
1172 static int ibmvnic_open(struct net_device *netdev)
1173 {
1174         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1175         int rc;
1176
1177         /* If device failover is pending, just set device state and return.
1178          * Device operation will be handled by reset routine.
1179          */
1180         if (adapter->failover_pending) {
1181                 adapter->state = VNIC_OPEN;
1182                 return 0;
1183         }
1184
1185         if (adapter->state != VNIC_CLOSED) {
1186                 rc = ibmvnic_login(netdev);
1187                 if (rc)
1188                         goto out;
1189
1190                 rc = init_resources(adapter);
1191                 if (rc) {
1192                         netdev_err(netdev, "failed to initialize resources\n");
1193                         release_resources(adapter);
1194                         goto out;
1195                 }
1196         }
1197
1198         rc = __ibmvnic_open(netdev);
1199
1200 out:
1201         /*
1202          * If open fails due to a pending failover, set device state and
1203          * return. Device operation will be handled by reset routine.
1204          */
1205         if (rc && adapter->failover_pending) {
1206                 adapter->state = VNIC_OPEN;
1207                 rc = 0;
1208         }
1209         return rc;
1210 }
1211
1212 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1213 {
1214         struct ibmvnic_rx_pool *rx_pool;
1215         struct ibmvnic_rx_buff *rx_buff;
1216         u64 rx_entries;
1217         int rx_scrqs;
1218         int i, j;
1219
1220         if (!adapter->rx_pool)
1221                 return;
1222
1223         rx_scrqs = adapter->num_active_rx_pools;
1224         rx_entries = adapter->req_rx_add_entries_per_subcrq;
1225
1226         /* Free any remaining skbs in the rx buffer pools */
1227         for (i = 0; i < rx_scrqs; i++) {
1228                 rx_pool = &adapter->rx_pool[i];
1229                 if (!rx_pool || !rx_pool->rx_buff)
1230                         continue;
1231
1232                 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1233                 for (j = 0; j < rx_entries; j++) {
1234                         rx_buff = &rx_pool->rx_buff[j];
1235                         if (rx_buff && rx_buff->skb) {
1236                                 dev_kfree_skb_any(rx_buff->skb);
1237                                 rx_buff->skb = NULL;
1238                         }
1239                 }
1240         }
1241 }
1242
1243 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1244                               struct ibmvnic_tx_pool *tx_pool)
1245 {
1246         struct ibmvnic_tx_buff *tx_buff;
1247         u64 tx_entries;
1248         int i;
1249
1250         if (!tx_pool || !tx_pool->tx_buff)
1251                 return;
1252
1253         tx_entries = tx_pool->num_buffers;
1254
1255         for (i = 0; i < tx_entries; i++) {
1256                 tx_buff = &tx_pool->tx_buff[i];
1257                 if (tx_buff && tx_buff->skb) {
1258                         dev_kfree_skb_any(tx_buff->skb);
1259                         tx_buff->skb = NULL;
1260                 }
1261         }
1262 }
1263
1264 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1265 {
1266         int tx_scrqs;
1267         int i;
1268
1269         if (!adapter->tx_pool || !adapter->tso_pool)
1270                 return;
1271
1272         tx_scrqs = adapter->num_active_tx_pools;
1273
1274         /* Free any remaining skbs in the tx buffer pools */
1275         for (i = 0; i < tx_scrqs; i++) {
1276                 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1277                 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1278                 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1279         }
1280 }
1281
1282 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1283 {
1284         struct net_device *netdev = adapter->netdev;
1285         int i;
1286
1287         if (adapter->tx_scrq) {
1288                 for (i = 0; i < adapter->req_tx_queues; i++)
1289                         if (adapter->tx_scrq[i]->irq) {
1290                                 netdev_dbg(netdev,
1291                                            "Disabling tx_scrq[%d] irq\n", i);
1292                                 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1293                                 disable_irq(adapter->tx_scrq[i]->irq);
1294                         }
1295         }
1296
1297         if (adapter->rx_scrq) {
1298                 for (i = 0; i < adapter->req_rx_queues; i++) {
1299                         if (adapter->rx_scrq[i]->irq) {
1300                                 netdev_dbg(netdev,
1301                                            "Disabling rx_scrq[%d] irq\n", i);
1302                                 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1303                                 disable_irq(adapter->rx_scrq[i]->irq);
1304                         }
1305                 }
1306         }
1307 }
1308
1309 static void ibmvnic_cleanup(struct net_device *netdev)
1310 {
1311         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1312
1313         /* ensure that transmissions are stopped if called by do_reset */
1314         if (test_bit(0, &adapter->resetting))
1315                 netif_tx_disable(netdev);
1316         else
1317                 netif_tx_stop_all_queues(netdev);
1318
1319         ibmvnic_napi_disable(adapter);
1320         ibmvnic_disable_irqs(adapter);
1321
1322         clean_rx_pools(adapter);
1323         clean_tx_pools(adapter);
1324 }
1325
1326 static int __ibmvnic_close(struct net_device *netdev)
1327 {
1328         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1329         int rc = 0;
1330
1331         adapter->state = VNIC_CLOSING;
1332         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1333         if (rc)
1334                 return rc;
1335         adapter->state = VNIC_CLOSED;
1336         return 0;
1337 }
1338
1339 static int ibmvnic_close(struct net_device *netdev)
1340 {
1341         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1342         int rc;
1343
1344         /* If device failover is pending, just set device state and return.
1345          * Device operation will be handled by reset routine.
1346          */
1347         if (adapter->failover_pending) {
1348                 adapter->state = VNIC_CLOSED;
1349                 return 0;
1350         }
1351
1352         rc = __ibmvnic_close(netdev);
1353         ibmvnic_cleanup(netdev);
1354
1355         return rc;
1356 }
1357
1358 /**
1359  * build_hdr_data - creates L2/L3/L4 header data buffer
1360  * @hdr_field - bitfield determining needed headers
1361  * @skb - socket buffer
1362  * @hdr_len - array of header lengths
1363  * @tot_len - total length of data
1364  *
1365  * Reads hdr_field to determine which headers are needed by firmware.
1366  * Builds a buffer containing these headers.  Saves individual header
1367  * lengths and total buffer length to be used to build descriptors.
1368  */
1369 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1370                           int *hdr_len, u8 *hdr_data)
1371 {
1372         int len = 0;
1373         u8 *hdr;
1374
1375         if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1376                 hdr_len[0] = sizeof(struct vlan_ethhdr);
1377         else
1378                 hdr_len[0] = sizeof(struct ethhdr);
1379
1380         if (skb->protocol == htons(ETH_P_IP)) {
1381                 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1382                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1383                         hdr_len[2] = tcp_hdrlen(skb);
1384                 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1385                         hdr_len[2] = sizeof(struct udphdr);
1386         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1387                 hdr_len[1] = sizeof(struct ipv6hdr);
1388                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1389                         hdr_len[2] = tcp_hdrlen(skb);
1390                 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1391                         hdr_len[2] = sizeof(struct udphdr);
1392         } else if (skb->protocol == htons(ETH_P_ARP)) {
1393                 hdr_len[1] = arp_hdr_len(skb->dev);
1394                 hdr_len[2] = 0;
1395         }
1396
1397         memset(hdr_data, 0, 120);
1398         if ((hdr_field >> 6) & 1) {
1399                 hdr = skb_mac_header(skb);
1400                 memcpy(hdr_data, hdr, hdr_len[0]);
1401                 len += hdr_len[0];
1402         }
1403
1404         if ((hdr_field >> 5) & 1) {
1405                 hdr = skb_network_header(skb);
1406                 memcpy(hdr_data + len, hdr, hdr_len[1]);
1407                 len += hdr_len[1];
1408         }
1409
1410         if ((hdr_field >> 4) & 1) {
1411                 hdr = skb_transport_header(skb);
1412                 memcpy(hdr_data + len, hdr, hdr_len[2]);
1413                 len += hdr_len[2];
1414         }
1415         return len;
1416 }
1417
1418 /**
1419  * create_hdr_descs - create header and header extension descriptors
1420  * @hdr_field - bitfield determining needed headers
1421  * @data - buffer containing header data
1422  * @len - length of data buffer
1423  * @hdr_len - array of individual header lengths
1424  * @scrq_arr - descriptor array
1425  *
1426  * Creates header and, if needed, header extension descriptors and
1427  * places them in a descriptor array, scrq_arr
1428  */
1429
1430 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1431                             union sub_crq *scrq_arr)
1432 {
1433         union sub_crq hdr_desc;
1434         int tmp_len = len;
1435         int num_descs = 0;
1436         u8 *data, *cur;
1437         int tmp;
1438
1439         while (tmp_len > 0) {
1440                 cur = hdr_data + len - tmp_len;
1441
1442                 memset(&hdr_desc, 0, sizeof(hdr_desc));
1443                 if (cur != hdr_data) {
1444                         data = hdr_desc.hdr_ext.data;
1445                         tmp = tmp_len > 29 ? 29 : tmp_len;
1446                         hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1447                         hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1448                         hdr_desc.hdr_ext.len = tmp;
1449                 } else {
1450                         data = hdr_desc.hdr.data;
1451                         tmp = tmp_len > 24 ? 24 : tmp_len;
1452                         hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1453                         hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1454                         hdr_desc.hdr.len = tmp;
1455                         hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1456                         hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1457                         hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1458                         hdr_desc.hdr.flag = hdr_field << 1;
1459                 }
1460                 memcpy(data, cur, tmp);
1461                 tmp_len -= tmp;
1462                 *scrq_arr = hdr_desc;
1463                 scrq_arr++;
1464                 num_descs++;
1465         }
1466
1467         return num_descs;
1468 }
1469
1470 /**
1471  * build_hdr_descs_arr - build a header descriptor array
1472  * @skb - socket buffer
1473  * @num_entries - number of descriptors to be sent
1474  * @subcrq - first TX descriptor
1475  * @hdr_field - bit field determining which headers will be sent
1476  *
1477  * This function will build a TX descriptor array with applicable
1478  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1479  */
1480
1481 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1482                                 int *num_entries, u8 hdr_field)
1483 {
1484         int hdr_len[3] = {0, 0, 0};
1485         int tot_len;
1486         u8 *hdr_data = txbuff->hdr_data;
1487
1488         tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1489                                  txbuff->hdr_data);
1490         *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1491                          txbuff->indir_arr + 1);
1492 }
1493
1494 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1495                                     struct net_device *netdev)
1496 {
1497         /* For some backing devices, mishandling of small packets
1498          * can result in a loss of connection or TX stall. Device
1499          * architects recommend that no packet should be smaller
1500          * than the minimum MTU value provided to the driver, so
1501          * pad any packets to that length
1502          */
1503         if (skb->len < netdev->min_mtu)
1504                 return skb_put_padto(skb, netdev->min_mtu);
1505
1506         return 0;
1507 }
1508
1509 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1510 {
1511         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1512         int queue_num = skb_get_queue_mapping(skb);
1513         u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1514         struct device *dev = &adapter->vdev->dev;
1515         struct ibmvnic_tx_buff *tx_buff = NULL;
1516         struct ibmvnic_sub_crq_queue *tx_scrq;
1517         struct ibmvnic_tx_pool *tx_pool;
1518         unsigned int tx_send_failed = 0;
1519         unsigned int tx_map_failed = 0;
1520         unsigned int tx_dropped = 0;
1521         unsigned int tx_packets = 0;
1522         unsigned int tx_bytes = 0;
1523         dma_addr_t data_dma_addr;
1524         struct netdev_queue *txq;
1525         unsigned long lpar_rc;
1526         union sub_crq tx_crq;
1527         unsigned int offset;
1528         int num_entries = 1;
1529         unsigned char *dst;
1530         int index = 0;
1531         u8 proto = 0;
1532         u64 handle;
1533         netdev_tx_t ret = NETDEV_TX_OK;
1534
1535         if (test_bit(0, &adapter->resetting)) {
1536                 if (!netif_subqueue_stopped(netdev, skb))
1537                         netif_stop_subqueue(netdev, queue_num);
1538                 dev_kfree_skb_any(skb);
1539
1540                 tx_send_failed++;
1541                 tx_dropped++;
1542                 ret = NETDEV_TX_OK;
1543                 goto out;
1544         }
1545
1546         if (ibmvnic_xmit_workarounds(skb, netdev)) {
1547                 tx_dropped++;
1548                 tx_send_failed++;
1549                 ret = NETDEV_TX_OK;
1550                 goto out;
1551         }
1552         if (skb_is_gso(skb))
1553                 tx_pool = &adapter->tso_pool[queue_num];
1554         else
1555                 tx_pool = &adapter->tx_pool[queue_num];
1556
1557         tx_scrq = adapter->tx_scrq[queue_num];
1558         txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1559         handle = tx_scrq->handle;
1560
1561         index = tx_pool->free_map[tx_pool->consumer_index];
1562
1563         if (index == IBMVNIC_INVALID_MAP) {
1564                 dev_kfree_skb_any(skb);
1565                 tx_send_failed++;
1566                 tx_dropped++;
1567                 ret = NETDEV_TX_OK;
1568                 goto out;
1569         }
1570
1571         tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1572
1573         offset = index * tx_pool->buf_size;
1574         dst = tx_pool->long_term_buff.buff + offset;
1575         memset(dst, 0, tx_pool->buf_size);
1576         data_dma_addr = tx_pool->long_term_buff.addr + offset;
1577
1578         if (skb_shinfo(skb)->nr_frags) {
1579                 int cur, i;
1580
1581                 /* Copy the head */
1582                 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1583                 cur = skb_headlen(skb);
1584
1585                 /* Copy the frags */
1586                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587                         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1588
1589                         memcpy(dst + cur,
1590                                page_address(skb_frag_page(frag)) +
1591                                skb_frag_off(frag), skb_frag_size(frag));
1592                         cur += skb_frag_size(frag);
1593                 }
1594         } else {
1595                 skb_copy_from_linear_data(skb, dst, skb->len);
1596         }
1597
1598         tx_pool->consumer_index =
1599             (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1600
1601         tx_buff = &tx_pool->tx_buff[index];
1602         tx_buff->skb = skb;
1603         tx_buff->data_dma[0] = data_dma_addr;
1604         tx_buff->data_len[0] = skb->len;
1605         tx_buff->index = index;
1606         tx_buff->pool_index = queue_num;
1607         tx_buff->last_frag = true;
1608
1609         memset(&tx_crq, 0, sizeof(tx_crq));
1610         tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1611         tx_crq.v1.type = IBMVNIC_TX_DESC;
1612         tx_crq.v1.n_crq_elem = 1;
1613         tx_crq.v1.n_sge = 1;
1614         tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1615
1616         if (skb_is_gso(skb))
1617                 tx_crq.v1.correlator =
1618                         cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1619         else
1620                 tx_crq.v1.correlator = cpu_to_be32(index);
1621         tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1622         tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1623         tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1624
1625         if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1626                 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1627                 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1628         }
1629
1630         if (skb->protocol == htons(ETH_P_IP)) {
1631                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1632                 proto = ip_hdr(skb)->protocol;
1633         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1634                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1635                 proto = ipv6_hdr(skb)->nexthdr;
1636         }
1637
1638         if (proto == IPPROTO_TCP)
1639                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1640         else if (proto == IPPROTO_UDP)
1641                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1642
1643         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1644                 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1645                 hdrs += 2;
1646         }
1647         if (skb_is_gso(skb)) {
1648                 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1649                 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1650                 hdrs += 2;
1651         }
1652         /* determine if l2/3/4 headers are sent to firmware */
1653         if ((*hdrs >> 7) & 1) {
1654                 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1655                 tx_crq.v1.n_crq_elem = num_entries;
1656                 tx_buff->num_entries = num_entries;
1657                 tx_buff->indir_arr[0] = tx_crq;
1658                 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1659                                                     sizeof(tx_buff->indir_arr),
1660                                                     DMA_TO_DEVICE);
1661                 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1662                         dev_kfree_skb_any(skb);
1663                         tx_buff->skb = NULL;
1664                         if (!firmware_has_feature(FW_FEATURE_CMO))
1665                                 dev_err(dev, "tx: unable to map descriptor array\n");
1666                         tx_map_failed++;
1667                         tx_dropped++;
1668                         ret = NETDEV_TX_OK;
1669                         goto tx_err_out;
1670                 }
1671                 lpar_rc = send_subcrq_indirect(adapter, handle,
1672                                                (u64)tx_buff->indir_dma,
1673                                                (u64)num_entries);
1674                 dma_unmap_single(dev, tx_buff->indir_dma,
1675                                  sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1676         } else {
1677                 tx_buff->num_entries = num_entries;
1678                 lpar_rc = send_subcrq(adapter, handle,
1679                                       &tx_crq);
1680         }
1681         if (lpar_rc != H_SUCCESS) {
1682                 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1683                         dev_err_ratelimited(dev, "tx: send failed\n");
1684                 dev_kfree_skb_any(skb);
1685                 tx_buff->skb = NULL;
1686
1687                 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1688                         /* Disable TX and report carrier off if queue is closed
1689                          * or pending failover.
1690                          * Firmware guarantees that a signal will be sent to the
1691                          * driver, triggering a reset or some other action.
1692                          */
1693                         netif_tx_stop_all_queues(netdev);
1694                         netif_carrier_off(netdev);
1695                 }
1696
1697                 tx_send_failed++;
1698                 tx_dropped++;
1699                 ret = NETDEV_TX_OK;
1700                 goto tx_err_out;
1701         }
1702
1703         if (atomic_add_return(num_entries, &tx_scrq->used)
1704                                         >= adapter->req_tx_entries_per_subcrq) {
1705                 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1706                 netif_stop_subqueue(netdev, queue_num);
1707         }
1708
1709         tx_packets++;
1710         tx_bytes += skb->len;
1711         txq->trans_start = jiffies;
1712         ret = NETDEV_TX_OK;
1713         goto out;
1714
1715 tx_err_out:
1716         /* roll back consumer index and map array*/
1717         if (tx_pool->consumer_index == 0)
1718                 tx_pool->consumer_index =
1719                         tx_pool->num_buffers - 1;
1720         else
1721                 tx_pool->consumer_index--;
1722         tx_pool->free_map[tx_pool->consumer_index] = index;
1723 out:
1724         netdev->stats.tx_dropped += tx_dropped;
1725         netdev->stats.tx_bytes += tx_bytes;
1726         netdev->stats.tx_packets += tx_packets;
1727         adapter->tx_send_failed += tx_send_failed;
1728         adapter->tx_map_failed += tx_map_failed;
1729         adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1730         adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1731         adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1732
1733         return ret;
1734 }
1735
1736 static void ibmvnic_set_multi(struct net_device *netdev)
1737 {
1738         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1739         struct netdev_hw_addr *ha;
1740         union ibmvnic_crq crq;
1741
1742         memset(&crq, 0, sizeof(crq));
1743         crq.request_capability.first = IBMVNIC_CRQ_CMD;
1744         crq.request_capability.cmd = REQUEST_CAPABILITY;
1745
1746         if (netdev->flags & IFF_PROMISC) {
1747                 if (!adapter->promisc_supported)
1748                         return;
1749         } else {
1750                 if (netdev->flags & IFF_ALLMULTI) {
1751                         /* Accept all multicast */
1752                         memset(&crq, 0, sizeof(crq));
1753                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755                         crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1756                         ibmvnic_send_crq(adapter, &crq);
1757                 } else if (netdev_mc_empty(netdev)) {
1758                         /* Reject all multicast */
1759                         memset(&crq, 0, sizeof(crq));
1760                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1761                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1762                         crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1763                         ibmvnic_send_crq(adapter, &crq);
1764                 } else {
1765                         /* Accept one or more multicast(s) */
1766                         netdev_for_each_mc_addr(ha, netdev) {
1767                                 memset(&crq, 0, sizeof(crq));
1768                                 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1769                                 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1770                                 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1771                                 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1772                                                 ha->addr);
1773                                 ibmvnic_send_crq(adapter, &crq);
1774                         }
1775                 }
1776         }
1777 }
1778
1779 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1780 {
1781         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1782         union ibmvnic_crq crq;
1783         int rc;
1784
1785         if (!is_valid_ether_addr(dev_addr)) {
1786                 rc = -EADDRNOTAVAIL;
1787                 goto err;
1788         }
1789
1790         memset(&crq, 0, sizeof(crq));
1791         crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1792         crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1793         ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1794
1795         mutex_lock(&adapter->fw_lock);
1796         adapter->fw_done_rc = 0;
1797         reinit_completion(&adapter->fw_done);
1798
1799         rc = ibmvnic_send_crq(adapter, &crq);
1800         if (rc) {
1801                 rc = -EIO;
1802                 mutex_unlock(&adapter->fw_lock);
1803                 goto err;
1804         }
1805
1806         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1807         /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1808         if (rc || adapter->fw_done_rc) {
1809                 rc = -EIO;
1810                 mutex_unlock(&adapter->fw_lock);
1811                 goto err;
1812         }
1813         mutex_unlock(&adapter->fw_lock);
1814         return 0;
1815 err:
1816         ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1817         return rc;
1818 }
1819
1820 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1821 {
1822         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1823         struct sockaddr *addr = p;
1824         int rc;
1825
1826         rc = 0;
1827         if (!is_valid_ether_addr(addr->sa_data))
1828                 return -EADDRNOTAVAIL;
1829
1830         if (adapter->state != VNIC_PROBED) {
1831                 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1832                 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1833         }
1834
1835         return rc;
1836 }
1837
1838 /**
1839  * do_change_param_reset returns zero if we are able to keep processing reset
1840  * events, or non-zero if we hit a fatal error and must halt.
1841  */
1842 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1843                                  struct ibmvnic_rwi *rwi,
1844                                  u32 reset_state)
1845 {
1846         struct net_device *netdev = adapter->netdev;
1847         int i, rc;
1848
1849         netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1850                    rwi->reset_reason);
1851
1852         netif_carrier_off(netdev);
1853         adapter->reset_reason = rwi->reset_reason;
1854
1855         ibmvnic_cleanup(netdev);
1856
1857         if (reset_state == VNIC_OPEN) {
1858                 rc = __ibmvnic_close(netdev);
1859                 if (rc)
1860                         return rc;
1861         }
1862
1863         release_resources(adapter);
1864         release_sub_crqs(adapter, 1);
1865         release_crq_queue(adapter);
1866
1867         adapter->state = VNIC_PROBED;
1868
1869         rc = init_crq_queue(adapter);
1870
1871         if (rc) {
1872                 netdev_err(adapter->netdev,
1873                            "Couldn't initialize crq. rc=%d\n", rc);
1874                 return rc;
1875         }
1876
1877         rc = ibmvnic_reset_init(adapter, true);
1878         if (rc)
1879                 return IBMVNIC_INIT_FAILED;
1880
1881         /* If the adapter was in PROBE state prior to the reset,
1882          * exit here.
1883          */
1884         if (reset_state == VNIC_PROBED)
1885                 return 0;
1886
1887         rc = ibmvnic_login(netdev);
1888         if (rc) {
1889                 adapter->state = reset_state;
1890                 return rc;
1891         }
1892
1893         rc = init_resources(adapter);
1894         if (rc)
1895                 return rc;
1896
1897         ibmvnic_disable_irqs(adapter);
1898
1899         adapter->state = VNIC_CLOSED;
1900
1901         if (reset_state == VNIC_CLOSED)
1902                 return 0;
1903
1904         rc = __ibmvnic_open(netdev);
1905         if (rc)
1906                 return IBMVNIC_OPEN_FAILED;
1907
1908         /* refresh device's multicast list */
1909         ibmvnic_set_multi(netdev);
1910
1911         /* kick napi */
1912         for (i = 0; i < adapter->req_rx_queues; i++)
1913                 napi_schedule(&adapter->napi[i]);
1914
1915         return 0;
1916 }
1917
1918 /**
1919  * do_reset returns zero if we are able to keep processing reset events, or
1920  * non-zero if we hit a fatal error and must halt.
1921  */
1922 static int do_reset(struct ibmvnic_adapter *adapter,
1923                     struct ibmvnic_rwi *rwi, u32 reset_state)
1924 {
1925         u64 old_num_rx_queues, old_num_tx_queues;
1926         u64 old_num_rx_slots, old_num_tx_slots;
1927         struct net_device *netdev = adapter->netdev;
1928         int i, rc;
1929
1930         netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1931                    rwi->reset_reason);
1932
1933         rtnl_lock();
1934         /*
1935          * Now that we have the rtnl lock, clear any pending failover.
1936          * This will ensure ibmvnic_open() has either completed or will
1937          * block until failover is complete.
1938          */
1939         if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1940                 adapter->failover_pending = false;
1941
1942         netif_carrier_off(netdev);
1943         adapter->reset_reason = rwi->reset_reason;
1944
1945         old_num_rx_queues = adapter->req_rx_queues;
1946         old_num_tx_queues = adapter->req_tx_queues;
1947         old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1948         old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1949
1950         ibmvnic_cleanup(netdev);
1951
1952         if (reset_state == VNIC_OPEN &&
1953             adapter->reset_reason != VNIC_RESET_MOBILITY &&
1954             adapter->reset_reason != VNIC_RESET_FAILOVER) {
1955                 adapter->state = VNIC_CLOSING;
1956
1957                 /* Release the RTNL lock before link state change and
1958                  * re-acquire after the link state change to allow
1959                  * linkwatch_event to grab the RTNL lock and run during
1960                  * a reset.
1961                  */
1962                 rtnl_unlock();
1963                 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1964                 rtnl_lock();
1965                 if (rc)
1966                         goto out;
1967
1968                 if (adapter->state != VNIC_CLOSING) {
1969                         rc = -1;
1970                         goto out;
1971                 }
1972
1973                 adapter->state = VNIC_CLOSED;
1974         }
1975
1976         if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1977                 /* remove the closed state so when we call open it appears
1978                  * we are coming from the probed state.
1979                  */
1980                 adapter->state = VNIC_PROBED;
1981
1982                 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1983                         rc = ibmvnic_reenable_crq_queue(adapter);
1984                         release_sub_crqs(adapter, 1);
1985                 } else {
1986                         rc = ibmvnic_reset_crq(adapter);
1987                         if (rc == H_CLOSED || rc == H_SUCCESS) {
1988                                 rc = vio_enable_interrupts(adapter->vdev);
1989                                 if (rc)
1990                                         netdev_err(adapter->netdev,
1991                                                    "Reset failed to enable interrupts. rc=%d\n",
1992                                                    rc);
1993                         }
1994                 }
1995
1996                 if (rc) {
1997                         netdev_err(adapter->netdev,
1998                                    "Reset couldn't initialize crq. rc=%d\n", rc);
1999                         goto out;
2000                 }
2001
2002                 rc = ibmvnic_reset_init(adapter, true);
2003                 if (rc) {
2004                         rc = IBMVNIC_INIT_FAILED;
2005                         goto out;
2006                 }
2007
2008                 /* If the adapter was in PROBE state prior to the reset,
2009                  * exit here.
2010                  */
2011                 if (reset_state == VNIC_PROBED) {
2012                         rc = 0;
2013                         goto out;
2014                 }
2015
2016                 rc = ibmvnic_login(netdev);
2017                 if (rc) {
2018                         adapter->state = reset_state;
2019                         goto out;
2020                 }
2021
2022                 if (adapter->req_rx_queues != old_num_rx_queues ||
2023                     adapter->req_tx_queues != old_num_tx_queues ||
2024                     adapter->req_rx_add_entries_per_subcrq !=
2025                     old_num_rx_slots ||
2026                     adapter->req_tx_entries_per_subcrq !=
2027                     old_num_tx_slots ||
2028                     !adapter->rx_pool ||
2029                     !adapter->tso_pool ||
2030                     !adapter->tx_pool) {
2031                         release_rx_pools(adapter);
2032                         release_tx_pools(adapter);
2033                         release_napi(adapter);
2034                         release_vpd_data(adapter);
2035
2036                         rc = init_resources(adapter);
2037                         if (rc)
2038                                 goto out;
2039
2040                 } else {
2041                         rc = reset_tx_pools(adapter);
2042                         if (rc) {
2043                                 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2044                                                 rc);
2045                                 goto out;
2046                         }
2047
2048                         rc = reset_rx_pools(adapter);
2049                         if (rc) {
2050                                 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2051                                                 rc);
2052                                 goto out;
2053                         }
2054                 }
2055                 ibmvnic_disable_irqs(adapter);
2056         }
2057         adapter->state = VNIC_CLOSED;
2058
2059         if (reset_state == VNIC_CLOSED) {
2060                 rc = 0;
2061                 goto out;
2062         }
2063
2064         rc = __ibmvnic_open(netdev);
2065         if (rc) {
2066                 rc = IBMVNIC_OPEN_FAILED;
2067                 goto out;
2068         }
2069
2070         /* refresh device's multicast list */
2071         ibmvnic_set_multi(netdev);
2072
2073         /* kick napi */
2074         for (i = 0; i < adapter->req_rx_queues; i++)
2075                 napi_schedule(&adapter->napi[i]);
2076
2077         if (adapter->reset_reason != VNIC_RESET_FAILOVER)
2078                 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2079
2080         rc = 0;
2081
2082 out:
2083         rtnl_unlock();
2084
2085         return rc;
2086 }
2087
2088 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2089                          struct ibmvnic_rwi *rwi, u32 reset_state)
2090 {
2091         struct net_device *netdev = adapter->netdev;
2092         int rc;
2093
2094         netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2095                    rwi->reset_reason);
2096
2097         netif_carrier_off(netdev);
2098         adapter->reset_reason = rwi->reset_reason;
2099
2100         ibmvnic_cleanup(netdev);
2101         release_resources(adapter);
2102         release_sub_crqs(adapter, 0);
2103         release_crq_queue(adapter);
2104
2105         /* remove the closed state so when we call open it appears
2106          * we are coming from the probed state.
2107          */
2108         adapter->state = VNIC_PROBED;
2109
2110         reinit_completion(&adapter->init_done);
2111         rc = init_crq_queue(adapter);
2112         if (rc) {
2113                 netdev_err(adapter->netdev,
2114                            "Couldn't initialize crq. rc=%d\n", rc);
2115                 return rc;
2116         }
2117
2118         rc = ibmvnic_reset_init(adapter, false);
2119         if (rc)
2120                 return rc;
2121
2122         /* If the adapter was in PROBE state prior to the reset,
2123          * exit here.
2124          */
2125         if (reset_state == VNIC_PROBED)
2126                 return 0;
2127
2128         rc = ibmvnic_login(netdev);
2129         if (rc) {
2130                 adapter->state = VNIC_PROBED;
2131                 return 0;
2132         }
2133
2134         rc = init_resources(adapter);
2135         if (rc)
2136                 return rc;
2137
2138         ibmvnic_disable_irqs(adapter);
2139         adapter->state = VNIC_CLOSED;
2140
2141         if (reset_state == VNIC_CLOSED)
2142                 return 0;
2143
2144         rc = __ibmvnic_open(netdev);
2145         if (rc)
2146                 return IBMVNIC_OPEN_FAILED;
2147
2148         return 0;
2149 }
2150
2151 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2152 {
2153         struct ibmvnic_rwi *rwi;
2154         unsigned long flags;
2155
2156         spin_lock_irqsave(&adapter->rwi_lock, flags);
2157
2158         if (!list_empty(&adapter->rwi_list)) {
2159                 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2160                                        list);
2161                 list_del(&rwi->list);
2162         } else {
2163                 rwi = NULL;
2164         }
2165
2166         spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2167         return rwi;
2168 }
2169
2170 static void free_all_rwi(struct ibmvnic_adapter *adapter)
2171 {
2172         struct ibmvnic_rwi *rwi;
2173
2174         rwi = get_next_rwi(adapter);
2175         while (rwi) {
2176                 kfree(rwi);
2177                 rwi = get_next_rwi(adapter);
2178         }
2179 }
2180
2181 static void __ibmvnic_reset(struct work_struct *work)
2182 {
2183         struct ibmvnic_rwi *rwi;
2184         struct ibmvnic_adapter *adapter;
2185         bool saved_state = false;
2186         unsigned long flags;
2187         u32 reset_state;
2188         int rc = 0;
2189
2190         adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2191
2192         if (test_and_set_bit_lock(0, &adapter->resetting)) {
2193                 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2194                                       IBMVNIC_RESET_DELAY);
2195                 return;
2196         }
2197
2198         rwi = get_next_rwi(adapter);
2199         while (rwi) {
2200                 spin_lock_irqsave(&adapter->state_lock, flags);
2201
2202                 if (adapter->state == VNIC_REMOVING ||
2203                     adapter->state == VNIC_REMOVED) {
2204                         spin_unlock_irqrestore(&adapter->state_lock, flags);
2205                         kfree(rwi);
2206                         rc = EBUSY;
2207                         break;
2208                 }
2209
2210                 if (!saved_state) {
2211                         reset_state = adapter->state;
2212                         adapter->state = VNIC_RESETTING;
2213                         saved_state = true;
2214                 }
2215                 spin_unlock_irqrestore(&adapter->state_lock, flags);
2216
2217                 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2218                         /* CHANGE_PARAM requestor holds rtnl_lock */
2219                         rc = do_change_param_reset(adapter, rwi, reset_state);
2220                 } else if (adapter->force_reset_recovery) {
2221                         /*
2222                          * Since we are doing a hard reset now, clear the
2223                          * failover_pending flag so we don't ignore any
2224                          * future MOBILITY or other resets.
2225                          */
2226                         adapter->failover_pending = false;
2227
2228                         /* Transport event occurred during previous reset */
2229                         if (adapter->wait_for_reset) {
2230                                 /* Previous was CHANGE_PARAM; caller locked */
2231                                 adapter->force_reset_recovery = false;
2232                                 rc = do_hard_reset(adapter, rwi, reset_state);
2233                         } else {
2234                                 rtnl_lock();
2235                                 adapter->force_reset_recovery = false;
2236                                 rc = do_hard_reset(adapter, rwi, reset_state);
2237                                 rtnl_unlock();
2238                         }
2239                 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2240                                 adapter->from_passive_init)) {
2241                         rc = do_reset(adapter, rwi, reset_state);
2242                 }
2243                 kfree(rwi);
2244                 if (rc == IBMVNIC_OPEN_FAILED) {
2245                         if (list_empty(&adapter->rwi_list))
2246                                 adapter->state = VNIC_CLOSED;
2247                         else
2248                                 adapter->state = reset_state;
2249                         rc = 0;
2250                 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
2251                     !adapter->force_reset_recovery)
2252                         break;
2253
2254                 rwi = get_next_rwi(adapter);
2255
2256                 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2257                             rwi->reset_reason == VNIC_RESET_MOBILITY))
2258                         adapter->force_reset_recovery = true;
2259         }
2260
2261         if (adapter->wait_for_reset) {
2262                 adapter->reset_done_rc = rc;
2263                 complete(&adapter->reset_done);
2264         }
2265
2266         if (rc) {
2267                 netdev_dbg(adapter->netdev, "Reset failed\n");
2268                 free_all_rwi(adapter);
2269         }
2270
2271         clear_bit_unlock(0, &adapter->resetting);
2272 }
2273
2274 static void __ibmvnic_delayed_reset(struct work_struct *work)
2275 {
2276         struct ibmvnic_adapter *adapter;
2277
2278         adapter = container_of(work, struct ibmvnic_adapter,
2279                                ibmvnic_delayed_reset.work);
2280         __ibmvnic_reset(&adapter->ibmvnic_reset);
2281 }
2282
2283 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2284                          enum ibmvnic_reset_reason reason)
2285 {
2286         struct list_head *entry, *tmp_entry;
2287         struct ibmvnic_rwi *rwi, *tmp;
2288         struct net_device *netdev = adapter->netdev;
2289         unsigned long flags;
2290         int ret;
2291
2292         /*
2293          * If failover is pending don't schedule any other reset.
2294          * Instead let the failover complete. If there is already a
2295          * a failover reset scheduled, we will detect and drop the
2296          * duplicate reset when walking the ->rwi_list below.
2297          */
2298         if (adapter->state == VNIC_REMOVING ||
2299             adapter->state == VNIC_REMOVED ||
2300             (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2301                 ret = EBUSY;
2302                 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2303                 goto err;
2304         }
2305
2306         if (adapter->state == VNIC_PROBING) {
2307                 netdev_warn(netdev, "Adapter reset during probe\n");
2308                 ret = adapter->init_done_rc = EAGAIN;
2309                 goto err;
2310         }
2311
2312         spin_lock_irqsave(&adapter->rwi_lock, flags);
2313
2314         list_for_each(entry, &adapter->rwi_list) {
2315                 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2316                 if (tmp->reset_reason == reason) {
2317                         netdev_dbg(netdev, "Skipping matching reset\n");
2318                         spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2319                         ret = EBUSY;
2320                         goto err;
2321                 }
2322         }
2323
2324         rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2325         if (!rwi) {
2326                 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2327                 ibmvnic_close(netdev);
2328                 ret = ENOMEM;
2329                 goto err;
2330         }
2331         /* if we just received a transport event,
2332          * flush reset queue and process this reset
2333          */
2334         if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2335                 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2336                         list_del(entry);
2337         }
2338         rwi->reset_reason = reason;
2339         list_add_tail(&rwi->list, &adapter->rwi_list);
2340         spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2341         netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2342         schedule_work(&adapter->ibmvnic_reset);
2343
2344         return 0;
2345 err:
2346         return -ret;
2347 }
2348
2349 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2350 {
2351         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2352
2353         ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2354 }
2355
2356 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2357                                   struct ibmvnic_rx_buff *rx_buff)
2358 {
2359         struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2360
2361         rx_buff->skb = NULL;
2362
2363         pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2364         pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2365
2366         atomic_dec(&pool->available);
2367 }
2368
2369 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2370 {
2371         struct net_device *netdev = napi->dev;
2372         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2373         int scrq_num = (int)(napi - adapter->napi);
2374         int frames_processed = 0;
2375
2376 restart_poll:
2377         while (frames_processed < budget) {
2378                 struct sk_buff *skb;
2379                 struct ibmvnic_rx_buff *rx_buff;
2380                 union sub_crq *next;
2381                 u32 length;
2382                 u16 offset;
2383                 u8 flags = 0;
2384
2385                 if (unlikely(test_bit(0, &adapter->resetting) &&
2386                              adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2387                         enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2388                         napi_complete_done(napi, frames_processed);
2389                         return frames_processed;
2390                 }
2391
2392                 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2393                         break;
2394                 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2395                 rx_buff =
2396                     (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2397                                                           rx_comp.correlator);
2398                 /* do error checking */
2399                 if (next->rx_comp.rc) {
2400                         netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2401                                    be16_to_cpu(next->rx_comp.rc));
2402                         /* free the entry */
2403                         next->rx_comp.first = 0;
2404                         dev_kfree_skb_any(rx_buff->skb);
2405                         remove_buff_from_pool(adapter, rx_buff);
2406                         continue;
2407                 } else if (!rx_buff->skb) {
2408                         /* free the entry */
2409                         next->rx_comp.first = 0;
2410                         remove_buff_from_pool(adapter, rx_buff);
2411                         continue;
2412                 }
2413
2414                 length = be32_to_cpu(next->rx_comp.len);
2415                 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2416                 flags = next->rx_comp.flags;
2417                 skb = rx_buff->skb;
2418                 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2419                                         length);
2420
2421                 /* VLAN Header has been stripped by the system firmware and
2422                  * needs to be inserted by the driver
2423                  */
2424                 if (adapter->rx_vlan_header_insertion &&
2425                     (flags & IBMVNIC_VLAN_STRIPPED))
2426                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2427                                                ntohs(next->rx_comp.vlan_tci));
2428
2429                 /* free the entry */
2430                 next->rx_comp.first = 0;
2431                 remove_buff_from_pool(adapter, rx_buff);
2432
2433                 skb_put(skb, length);
2434                 skb->protocol = eth_type_trans(skb, netdev);
2435                 skb_record_rx_queue(skb, scrq_num);
2436
2437                 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2438                     flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2439                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2440                 }
2441
2442                 length = skb->len;
2443                 napi_gro_receive(napi, skb); /* send it up */
2444                 netdev->stats.rx_packets++;
2445                 netdev->stats.rx_bytes += length;
2446                 adapter->rx_stats_buffers[scrq_num].packets++;
2447                 adapter->rx_stats_buffers[scrq_num].bytes += length;
2448                 frames_processed++;
2449         }
2450
2451         if (adapter->state != VNIC_CLOSING)
2452                 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2453
2454         if (frames_processed < budget) {
2455                 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2456                 napi_complete_done(napi, frames_processed);
2457                 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2458                     napi_reschedule(napi)) {
2459                         disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2460                         goto restart_poll;
2461                 }
2462         }
2463         return frames_processed;
2464 }
2465
2466 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2467 {
2468         int rc, ret;
2469
2470         adapter->fallback.mtu = adapter->req_mtu;
2471         adapter->fallback.rx_queues = adapter->req_rx_queues;
2472         adapter->fallback.tx_queues = adapter->req_tx_queues;
2473         adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2474         adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2475
2476         reinit_completion(&adapter->reset_done);
2477         adapter->wait_for_reset = true;
2478         rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2479
2480         if (rc) {
2481                 ret = rc;
2482                 goto out;
2483         }
2484         rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2485         if (rc) {
2486                 ret = -ENODEV;
2487                 goto out;
2488         }
2489
2490         ret = 0;
2491         if (adapter->reset_done_rc) {
2492                 ret = -EIO;
2493                 adapter->desired.mtu = adapter->fallback.mtu;
2494                 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2495                 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2496                 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2497                 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2498
2499                 reinit_completion(&adapter->reset_done);
2500                 adapter->wait_for_reset = true;
2501                 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2502                 if (rc) {
2503                         ret = rc;
2504                         goto out;
2505                 }
2506                 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2507                                                  60000);
2508                 if (rc) {
2509                         ret = -ENODEV;
2510                         goto out;
2511                 }
2512         }
2513 out:
2514         adapter->wait_for_reset = false;
2515
2516         return ret;
2517 }
2518
2519 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2520 {
2521         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2522
2523         adapter->desired.mtu = new_mtu + ETH_HLEN;
2524
2525         return wait_for_reset(adapter);
2526 }
2527
2528 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2529                                                 struct net_device *dev,
2530                                                 netdev_features_t features)
2531 {
2532         /* Some backing hardware adapters can not
2533          * handle packets with a MSS less than 224
2534          * or with only one segment.
2535          */
2536         if (skb_is_gso(skb)) {
2537                 if (skb_shinfo(skb)->gso_size < 224 ||
2538                     skb_shinfo(skb)->gso_segs == 1)
2539                         features &= ~NETIF_F_GSO_MASK;
2540         }
2541
2542         return features;
2543 }
2544
2545 static const struct net_device_ops ibmvnic_netdev_ops = {
2546         .ndo_open               = ibmvnic_open,
2547         .ndo_stop               = ibmvnic_close,
2548         .ndo_start_xmit         = ibmvnic_xmit,
2549         .ndo_set_rx_mode        = ibmvnic_set_multi,
2550         .ndo_set_mac_address    = ibmvnic_set_mac,
2551         .ndo_validate_addr      = eth_validate_addr,
2552         .ndo_tx_timeout         = ibmvnic_tx_timeout,
2553         .ndo_change_mtu         = ibmvnic_change_mtu,
2554         .ndo_features_check     = ibmvnic_features_check,
2555 };
2556
2557 /* ethtool functions */
2558
2559 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2560                                       struct ethtool_link_ksettings *cmd)
2561 {
2562         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2563         int rc;
2564
2565         rc = send_query_phys_parms(adapter);
2566         if (rc) {
2567                 adapter->speed = SPEED_UNKNOWN;
2568                 adapter->duplex = DUPLEX_UNKNOWN;
2569         }
2570         cmd->base.speed = adapter->speed;
2571         cmd->base.duplex = adapter->duplex;
2572         cmd->base.port = PORT_FIBRE;
2573         cmd->base.phy_address = 0;
2574         cmd->base.autoneg = AUTONEG_ENABLE;
2575
2576         return 0;
2577 }
2578
2579 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2580                                 struct ethtool_drvinfo *info)
2581 {
2582         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2583
2584         strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2585         strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2586         strlcpy(info->fw_version, adapter->fw_version,
2587                 sizeof(info->fw_version));
2588 }
2589
2590 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2591 {
2592         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2593
2594         return adapter->msg_enable;
2595 }
2596
2597 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2598 {
2599         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2600
2601         adapter->msg_enable = data;
2602 }
2603
2604 static u32 ibmvnic_get_link(struct net_device *netdev)
2605 {
2606         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2607
2608         /* Don't need to send a query because we request a logical link up at
2609          * init and then we wait for link state indications
2610          */
2611         return adapter->logical_link_state;
2612 }
2613
2614 static void ibmvnic_get_ringparam(struct net_device *netdev,
2615                                   struct ethtool_ringparam *ring)
2616 {
2617         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2618
2619         if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2620                 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2621                 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2622         } else {
2623                 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2624                 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2625         }
2626         ring->rx_mini_max_pending = 0;
2627         ring->rx_jumbo_max_pending = 0;
2628         ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2629         ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2630         ring->rx_mini_pending = 0;
2631         ring->rx_jumbo_pending = 0;
2632 }
2633
2634 static int ibmvnic_set_ringparam(struct net_device *netdev,
2635                                  struct ethtool_ringparam *ring)
2636 {
2637         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2638         int ret;
2639
2640         ret = 0;
2641         adapter->desired.rx_entries = ring->rx_pending;
2642         adapter->desired.tx_entries = ring->tx_pending;
2643
2644         ret = wait_for_reset(adapter);
2645
2646         if (!ret &&
2647             (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2648              adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2649                 netdev_info(netdev,
2650                             "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2651                             ring->rx_pending, ring->tx_pending,
2652                             adapter->req_rx_add_entries_per_subcrq,
2653                             adapter->req_tx_entries_per_subcrq);
2654         return ret;
2655 }
2656
2657 static void ibmvnic_get_channels(struct net_device *netdev,
2658                                  struct ethtool_channels *channels)
2659 {
2660         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2661
2662         if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2663                 channels->max_rx = adapter->max_rx_queues;
2664                 channels->max_tx = adapter->max_tx_queues;
2665         } else {
2666                 channels->max_rx = IBMVNIC_MAX_QUEUES;
2667                 channels->max_tx = IBMVNIC_MAX_QUEUES;
2668         }
2669
2670         channels->max_other = 0;
2671         channels->max_combined = 0;
2672         channels->rx_count = adapter->req_rx_queues;
2673         channels->tx_count = adapter->req_tx_queues;
2674         channels->other_count = 0;
2675         channels->combined_count = 0;
2676 }
2677
2678 static int ibmvnic_set_channels(struct net_device *netdev,
2679                                 struct ethtool_channels *channels)
2680 {
2681         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2682         int ret;
2683
2684         ret = 0;
2685         adapter->desired.rx_queues = channels->rx_count;
2686         adapter->desired.tx_queues = channels->tx_count;
2687
2688         ret = wait_for_reset(adapter);
2689
2690         if (!ret &&
2691             (adapter->req_rx_queues != channels->rx_count ||
2692              adapter->req_tx_queues != channels->tx_count))
2693                 netdev_info(netdev,
2694                             "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2695                             channels->rx_count, channels->tx_count,
2696                             adapter->req_rx_queues, adapter->req_tx_queues);
2697         return ret;
2698
2699 }
2700
2701 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2702 {
2703         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2704         int i;
2705
2706         switch (stringset) {
2707         case ETH_SS_STATS:
2708                 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2709                                 i++, data += ETH_GSTRING_LEN)
2710                         memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2711
2712                 for (i = 0; i < adapter->req_tx_queues; i++) {
2713                         snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2714                         data += ETH_GSTRING_LEN;
2715
2716                         snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2717                         data += ETH_GSTRING_LEN;
2718
2719                         snprintf(data, ETH_GSTRING_LEN,
2720                                  "tx%d_dropped_packets", i);
2721                         data += ETH_GSTRING_LEN;
2722                 }
2723
2724                 for (i = 0; i < adapter->req_rx_queues; i++) {
2725                         snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2726                         data += ETH_GSTRING_LEN;
2727
2728                         snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2729                         data += ETH_GSTRING_LEN;
2730
2731                         snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2732                         data += ETH_GSTRING_LEN;
2733                 }
2734                 break;
2735
2736         case ETH_SS_PRIV_FLAGS:
2737                 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2738                         strcpy(data + i * ETH_GSTRING_LEN,
2739                                ibmvnic_priv_flags[i]);
2740                 break;
2741         default:
2742                 return;
2743         }
2744 }
2745
2746 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2747 {
2748         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2749
2750         switch (sset) {
2751         case ETH_SS_STATS:
2752                 return ARRAY_SIZE(ibmvnic_stats) +
2753                        adapter->req_tx_queues * NUM_TX_STATS +
2754                        adapter->req_rx_queues * NUM_RX_STATS;
2755         case ETH_SS_PRIV_FLAGS:
2756                 return ARRAY_SIZE(ibmvnic_priv_flags);
2757         default:
2758                 return -EOPNOTSUPP;
2759         }
2760 }
2761
2762 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2763                                       struct ethtool_stats *stats, u64 *data)
2764 {
2765         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2766         union ibmvnic_crq crq;
2767         int i, j;
2768         int rc;
2769
2770         memset(&crq, 0, sizeof(crq));
2771         crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2772         crq.request_statistics.cmd = REQUEST_STATISTICS;
2773         crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2774         crq.request_statistics.len =
2775             cpu_to_be32(sizeof(struct ibmvnic_statistics));
2776
2777         /* Wait for data to be written */
2778         reinit_completion(&adapter->stats_done);
2779         rc = ibmvnic_send_crq(adapter, &crq);
2780         if (rc)
2781                 return;
2782         rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2783         if (rc)
2784                 return;
2785
2786         for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2787                 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2788                                                 ibmvnic_stats[i].offset));
2789
2790         for (j = 0; j < adapter->req_tx_queues; j++) {
2791                 data[i] = adapter->tx_stats_buffers[j].packets;
2792                 i++;
2793                 data[i] = adapter->tx_stats_buffers[j].bytes;
2794                 i++;
2795                 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2796                 i++;
2797         }
2798
2799         for (j = 0; j < adapter->req_rx_queues; j++) {
2800                 data[i] = adapter->rx_stats_buffers[j].packets;
2801                 i++;
2802                 data[i] = adapter->rx_stats_buffers[j].bytes;
2803                 i++;
2804                 data[i] = adapter->rx_stats_buffers[j].interrupts;
2805                 i++;
2806         }
2807 }
2808
2809 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2810 {
2811         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2812
2813         return adapter->priv_flags;
2814 }
2815
2816 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2817 {
2818         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2819         bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2820
2821         if (which_maxes)
2822                 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2823         else
2824                 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2825
2826         return 0;
2827 }
2828 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2829         .get_drvinfo            = ibmvnic_get_drvinfo,
2830         .get_msglevel           = ibmvnic_get_msglevel,
2831         .set_msglevel           = ibmvnic_set_msglevel,
2832         .get_link               = ibmvnic_get_link,
2833         .get_ringparam          = ibmvnic_get_ringparam,
2834         .set_ringparam          = ibmvnic_set_ringparam,
2835         .get_channels           = ibmvnic_get_channels,
2836         .set_channels           = ibmvnic_set_channels,
2837         .get_strings            = ibmvnic_get_strings,
2838         .get_sset_count         = ibmvnic_get_sset_count,
2839         .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
2840         .get_link_ksettings     = ibmvnic_get_link_ksettings,
2841         .get_priv_flags         = ibmvnic_get_priv_flags,
2842         .set_priv_flags         = ibmvnic_set_priv_flags,
2843 };
2844
2845 /* Routines for managing CRQs/sCRQs  */
2846
2847 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2848                                    struct ibmvnic_sub_crq_queue *scrq)
2849 {
2850         int rc;
2851
2852         if (scrq->irq) {
2853                 free_irq(scrq->irq, scrq);
2854                 irq_dispose_mapping(scrq->irq);
2855                 scrq->irq = 0;
2856         }
2857
2858         memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2859         atomic_set(&scrq->used, 0);
2860         scrq->cur = 0;
2861
2862         rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2863                            4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2864         return rc;
2865 }
2866
2867 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2868 {
2869         int i, rc;
2870
2871         for (i = 0; i < adapter->req_tx_queues; i++) {
2872                 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2873                 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2874                 if (rc)
2875                         return rc;
2876         }
2877
2878         for (i = 0; i < adapter->req_rx_queues; i++) {
2879                 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2880                 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2881                 if (rc)
2882                         return rc;
2883         }
2884
2885         return rc;
2886 }
2887
2888 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2889                                   struct ibmvnic_sub_crq_queue *scrq,
2890                                   bool do_h_free)
2891 {
2892         struct device *dev = &adapter->vdev->dev;
2893         long rc;
2894
2895         netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2896
2897         if (do_h_free) {
2898                 /* Close the sub-crqs */
2899                 do {
2900                         rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2901                                                 adapter->vdev->unit_address,
2902                                                 scrq->crq_num);
2903                 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2904
2905                 if (rc) {
2906                         netdev_err(adapter->netdev,
2907                                    "Failed to release sub-CRQ %16lx, rc = %ld\n",
2908                                    scrq->crq_num, rc);
2909                 }
2910         }
2911
2912         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2913                          DMA_BIDIRECTIONAL);
2914         free_pages((unsigned long)scrq->msgs, 2);
2915         kfree(scrq);
2916 }
2917
2918 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2919                                                         *adapter)
2920 {
2921         struct device *dev = &adapter->vdev->dev;
2922         struct ibmvnic_sub_crq_queue *scrq;
2923         int rc;
2924
2925         scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2926         if (!scrq)
2927                 return NULL;
2928
2929         scrq->msgs =
2930                 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2931         if (!scrq->msgs) {
2932                 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2933                 goto zero_page_failed;
2934         }
2935
2936         scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2937                                          DMA_BIDIRECTIONAL);
2938         if (dma_mapping_error(dev, scrq->msg_token)) {
2939                 dev_warn(dev, "Couldn't map crq queue messages page\n");
2940                 goto map_failed;
2941         }
2942
2943         rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2944                            4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2945
2946         if (rc == H_RESOURCE)
2947                 rc = ibmvnic_reset_crq(adapter);
2948
2949         if (rc == H_CLOSED) {
2950                 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2951         } else if (rc) {
2952                 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2953                 goto reg_failed;
2954         }
2955
2956         scrq->adapter = adapter;
2957         scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2958         spin_lock_init(&scrq->lock);
2959
2960         netdev_dbg(adapter->netdev,
2961                    "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2962                    scrq->crq_num, scrq->hw_irq, scrq->irq);
2963
2964         return scrq;
2965
2966 reg_failed:
2967         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2968                          DMA_BIDIRECTIONAL);
2969 map_failed:
2970         free_pages((unsigned long)scrq->msgs, 2);
2971 zero_page_failed:
2972         kfree(scrq);
2973
2974         return NULL;
2975 }
2976
2977 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2978 {
2979         int i;
2980
2981         if (adapter->tx_scrq) {
2982                 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2983                         if (!adapter->tx_scrq[i])
2984                                 continue;
2985
2986                         netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2987                                    i);
2988                         if (adapter->tx_scrq[i]->irq) {
2989                                 free_irq(adapter->tx_scrq[i]->irq,
2990                                          adapter->tx_scrq[i]);
2991                                 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2992                                 adapter->tx_scrq[i]->irq = 0;
2993                         }
2994
2995                         release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2996                                               do_h_free);
2997                 }
2998
2999                 kfree(adapter->tx_scrq);
3000                 adapter->tx_scrq = NULL;
3001                 adapter->num_active_tx_scrqs = 0;
3002         }
3003
3004         if (adapter->rx_scrq) {
3005                 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3006                         if (!adapter->rx_scrq[i])
3007                                 continue;
3008
3009                         netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3010                                    i);
3011                         if (adapter->rx_scrq[i]->irq) {
3012                                 free_irq(adapter->rx_scrq[i]->irq,
3013                                          adapter->rx_scrq[i]);
3014                                 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3015                                 adapter->rx_scrq[i]->irq = 0;
3016                         }
3017
3018                         release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3019                                               do_h_free);
3020                 }
3021
3022                 kfree(adapter->rx_scrq);
3023                 adapter->rx_scrq = NULL;
3024                 adapter->num_active_rx_scrqs = 0;
3025         }
3026 }
3027
3028 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3029                             struct ibmvnic_sub_crq_queue *scrq)
3030 {
3031         struct device *dev = &adapter->vdev->dev;
3032         unsigned long rc;
3033
3034         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3035                                 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3036         if (rc)
3037                 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3038                         scrq->hw_irq, rc);
3039         return rc;
3040 }
3041
3042 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3043                            struct ibmvnic_sub_crq_queue *scrq)
3044 {
3045         struct device *dev = &adapter->vdev->dev;
3046         unsigned long rc;
3047
3048         if (scrq->hw_irq > 0x100000000ULL) {
3049                 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3050                 return 1;
3051         }
3052
3053         if (test_bit(0, &adapter->resetting) &&
3054             adapter->reset_reason == VNIC_RESET_MOBILITY) {
3055                 u64 val = (0xff000000) | scrq->hw_irq;
3056
3057                 rc = plpar_hcall_norets(H_EOI, val);
3058                 /* H_EOI would fail with rc = H_FUNCTION when running
3059                  * in XIVE mode which is expected, but not an error.
3060                  */
3061                 if (rc && (rc != H_FUNCTION))
3062                         dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3063                                 val, rc);
3064         }
3065
3066         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3067                                 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3068         if (rc)
3069                 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3070                         scrq->hw_irq, rc);
3071         return rc;
3072 }
3073
3074 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3075                                struct ibmvnic_sub_crq_queue *scrq)
3076 {
3077         struct device *dev = &adapter->vdev->dev;
3078         struct ibmvnic_tx_pool *tx_pool;
3079         struct ibmvnic_tx_buff *txbuff;
3080         union sub_crq *next;
3081         int index;
3082         int i, j;
3083
3084 restart_loop:
3085         while (pending_scrq(adapter, scrq)) {
3086                 unsigned int pool = scrq->pool_index;
3087                 int num_entries = 0;
3088
3089                 next = ibmvnic_next_scrq(adapter, scrq);
3090                 for (i = 0; i < next->tx_comp.num_comps; i++) {
3091                         if (next->tx_comp.rcs[i]) {
3092                                 dev_err(dev, "tx error %x\n",
3093                                         next->tx_comp.rcs[i]);
3094                                 continue;
3095                         }
3096                         index = be32_to_cpu(next->tx_comp.correlators[i]);
3097                         if (index & IBMVNIC_TSO_POOL_MASK) {
3098                                 tx_pool = &adapter->tso_pool[pool];
3099                                 index &= ~IBMVNIC_TSO_POOL_MASK;
3100                         } else {
3101                                 tx_pool = &adapter->tx_pool[pool];
3102                         }
3103
3104                         txbuff = &tx_pool->tx_buff[index];
3105
3106                         for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3107                                 if (!txbuff->data_dma[j])
3108                                         continue;
3109
3110                                 txbuff->data_dma[j] = 0;
3111                         }
3112
3113                         if (txbuff->last_frag) {
3114                                 dev_kfree_skb_any(txbuff->skb);
3115                                 txbuff->skb = NULL;
3116                         }
3117
3118                         num_entries += txbuff->num_entries;
3119
3120                         tx_pool->free_map[tx_pool->producer_index] = index;
3121                         tx_pool->producer_index =
3122                                 (tx_pool->producer_index + 1) %
3123                                         tx_pool->num_buffers;
3124                 }
3125                 /* remove tx_comp scrq*/
3126                 next->tx_comp.first = 0;
3127
3128                 if (atomic_sub_return(num_entries, &scrq->used) <=
3129                     (adapter->req_tx_entries_per_subcrq / 2) &&
3130                     __netif_subqueue_stopped(adapter->netdev,
3131                                              scrq->pool_index)) {
3132                         netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3133                         netdev_dbg(adapter->netdev, "Started queue %d\n",
3134                                    scrq->pool_index);
3135                 }
3136         }
3137
3138         enable_scrq_irq(adapter, scrq);
3139
3140         if (pending_scrq(adapter, scrq)) {
3141                 disable_scrq_irq(adapter, scrq);
3142                 goto restart_loop;
3143         }
3144
3145         return 0;
3146 }
3147
3148 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3149 {
3150         struct ibmvnic_sub_crq_queue *scrq = instance;
3151         struct ibmvnic_adapter *adapter = scrq->adapter;
3152
3153         disable_scrq_irq(adapter, scrq);
3154         ibmvnic_complete_tx(adapter, scrq);
3155
3156         return IRQ_HANDLED;
3157 }
3158
3159 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3160 {
3161         struct ibmvnic_sub_crq_queue *scrq = instance;
3162         struct ibmvnic_adapter *adapter = scrq->adapter;
3163
3164         /* When booting a kdump kernel we can hit pending interrupts
3165          * prior to completing driver initialization.
3166          */
3167         if (unlikely(adapter->state != VNIC_OPEN))
3168                 return IRQ_NONE;
3169
3170         adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3171
3172         if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3173                 disable_scrq_irq(adapter, scrq);
3174                 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3175         }
3176
3177         return IRQ_HANDLED;
3178 }
3179
3180 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3181 {
3182         struct device *dev = &adapter->vdev->dev;
3183         struct ibmvnic_sub_crq_queue *scrq;
3184         int i = 0, j = 0;
3185         int rc = 0;
3186
3187         for (i = 0; i < adapter->req_tx_queues; i++) {
3188                 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3189                            i);
3190                 scrq = adapter->tx_scrq[i];
3191                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3192
3193                 if (!scrq->irq) {
3194                         rc = -EINVAL;
3195                         dev_err(dev, "Error mapping irq\n");
3196                         goto req_tx_irq_failed;
3197                 }
3198
3199                 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3200                          adapter->vdev->unit_address, i);
3201                 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3202                                  0, scrq->name, scrq);
3203
3204                 if (rc) {
3205                         dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3206                                 scrq->irq, rc);
3207                         irq_dispose_mapping(scrq->irq);
3208                         goto req_tx_irq_failed;
3209                 }
3210         }
3211
3212         for (i = 0; i < adapter->req_rx_queues; i++) {
3213                 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3214                            i);
3215                 scrq = adapter->rx_scrq[i];
3216                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3217                 if (!scrq->irq) {
3218                         rc = -EINVAL;
3219                         dev_err(dev, "Error mapping irq\n");
3220                         goto req_rx_irq_failed;
3221                 }
3222                 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3223                          adapter->vdev->unit_address, i);
3224                 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3225                                  0, scrq->name, scrq);
3226                 if (rc) {
3227                         dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3228                                 scrq->irq, rc);
3229                         irq_dispose_mapping(scrq->irq);
3230                         goto req_rx_irq_failed;
3231                 }
3232         }
3233         return rc;
3234
3235 req_rx_irq_failed:
3236         for (j = 0; j < i; j++) {
3237                 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3238                 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3239         }
3240         i = adapter->req_tx_queues;
3241 req_tx_irq_failed:
3242         for (j = 0; j < i; j++) {
3243                 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3244                 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3245         }
3246         release_sub_crqs(adapter, 1);
3247         return rc;
3248 }
3249
3250 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3251 {
3252         struct device *dev = &adapter->vdev->dev;
3253         struct ibmvnic_sub_crq_queue **allqueues;
3254         int registered_queues = 0;
3255         int total_queues;
3256         int more = 0;
3257         int i;
3258
3259         total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3260
3261         allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3262         if (!allqueues)
3263                 return -1;
3264
3265         for (i = 0; i < total_queues; i++) {
3266                 allqueues[i] = init_sub_crq_queue(adapter);
3267                 if (!allqueues[i]) {
3268                         dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3269                         break;
3270                 }
3271                 registered_queues++;
3272         }
3273
3274         /* Make sure we were able to register the minimum number of queues */
3275         if (registered_queues <
3276             adapter->min_tx_queues + adapter->min_rx_queues) {
3277                 dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3278                 goto tx_failed;
3279         }
3280
3281         /* Distribute the failed allocated queues*/
3282         for (i = 0; i < total_queues - registered_queues + more ; i++) {
3283                 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3284                 switch (i % 3) {
3285                 case 0:
3286                         if (adapter->req_rx_queues > adapter->min_rx_queues)
3287                                 adapter->req_rx_queues--;
3288                         else
3289                                 more++;
3290                         break;
3291                 case 1:
3292                         if (adapter->req_tx_queues > adapter->min_tx_queues)
3293                                 adapter->req_tx_queues--;
3294                         else
3295                                 more++;
3296                         break;
3297                 }
3298         }
3299
3300         adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3301                                    sizeof(*adapter->tx_scrq), GFP_KERNEL);
3302         if (!adapter->tx_scrq)
3303                 goto tx_failed;
3304
3305         for (i = 0; i < adapter->req_tx_queues; i++) {
3306                 adapter->tx_scrq[i] = allqueues[i];
3307                 adapter->tx_scrq[i]->pool_index = i;
3308                 adapter->num_active_tx_scrqs++;
3309         }
3310
3311         adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3312                                    sizeof(*adapter->rx_scrq), GFP_KERNEL);
3313         if (!adapter->rx_scrq)
3314                 goto rx_failed;
3315
3316         for (i = 0; i < adapter->req_rx_queues; i++) {
3317                 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3318                 adapter->rx_scrq[i]->scrq_num = i;
3319                 adapter->num_active_rx_scrqs++;
3320         }
3321
3322         kfree(allqueues);
3323         return 0;
3324
3325 rx_failed:
3326         kfree(adapter->tx_scrq);
3327         adapter->tx_scrq = NULL;
3328 tx_failed:
3329         for (i = 0; i < registered_queues; i++)
3330                 release_sub_crq_queue(adapter, allqueues[i], 1);
3331         kfree(allqueues);
3332         return -1;
3333 }
3334
3335 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3336 {
3337         struct device *dev = &adapter->vdev->dev;
3338         union ibmvnic_crq crq;
3339         int max_entries;
3340
3341         if (!retry) {
3342                 /* Sub-CRQ entries are 32 byte long */
3343                 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3344
3345                 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3346                     adapter->min_rx_add_entries_per_subcrq > entries_page) {
3347                         dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3348                         return;
3349                 }
3350
3351                 if (adapter->desired.mtu)
3352                         adapter->req_mtu = adapter->desired.mtu;
3353                 else
3354                         adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3355
3356                 if (!adapter->desired.tx_entries)
3357                         adapter->desired.tx_entries =
3358                                         adapter->max_tx_entries_per_subcrq;
3359                 if (!adapter->desired.rx_entries)
3360                         adapter->desired.rx_entries =
3361                                         adapter->max_rx_add_entries_per_subcrq;
3362
3363                 max_entries = IBMVNIC_MAX_LTB_SIZE /
3364                               (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3365
3366                 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3367                         adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3368                         adapter->desired.tx_entries = max_entries;
3369                 }
3370
3371                 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3372                         adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3373                         adapter->desired.rx_entries = max_entries;
3374                 }
3375
3376                 if (adapter->desired.tx_entries)
3377                         adapter->req_tx_entries_per_subcrq =
3378                                         adapter->desired.tx_entries;
3379                 else
3380                         adapter->req_tx_entries_per_subcrq =
3381                                         adapter->max_tx_entries_per_subcrq;
3382
3383                 if (adapter->desired.rx_entries)
3384                         adapter->req_rx_add_entries_per_subcrq =
3385                                         adapter->desired.rx_entries;
3386                 else
3387                         adapter->req_rx_add_entries_per_subcrq =
3388                                         adapter->max_rx_add_entries_per_subcrq;
3389
3390                 if (adapter->desired.tx_queues)
3391                         adapter->req_tx_queues =
3392                                         adapter->desired.tx_queues;
3393                 else
3394                         adapter->req_tx_queues =
3395                                         adapter->opt_tx_comp_sub_queues;
3396
3397                 if (adapter->desired.rx_queues)
3398                         adapter->req_rx_queues =
3399                                         adapter->desired.rx_queues;
3400                 else
3401                         adapter->req_rx_queues =
3402                                         adapter->opt_rx_comp_queues;
3403
3404                 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3405         }
3406
3407         memset(&crq, 0, sizeof(crq));
3408         crq.request_capability.first = IBMVNIC_CRQ_CMD;
3409         crq.request_capability.cmd = REQUEST_CAPABILITY;
3410
3411         crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3412         crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3413         atomic_inc(&adapter->running_cap_crqs);
3414         ibmvnic_send_crq(adapter, &crq);
3415
3416         crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3417         crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3418         atomic_inc(&adapter->running_cap_crqs);
3419         ibmvnic_send_crq(adapter, &crq);
3420
3421         crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3422         crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3423         atomic_inc(&adapter->running_cap_crqs);
3424         ibmvnic_send_crq(adapter, &crq);
3425
3426         crq.request_capability.capability =
3427             cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3428         crq.request_capability.number =
3429             cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3430         atomic_inc(&adapter->running_cap_crqs);
3431         ibmvnic_send_crq(adapter, &crq);
3432
3433         crq.request_capability.capability =
3434             cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3435         crq.request_capability.number =
3436             cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3437         atomic_inc(&adapter->running_cap_crqs);
3438         ibmvnic_send_crq(adapter, &crq);
3439
3440         crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3441         crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3442         atomic_inc(&adapter->running_cap_crqs);
3443         ibmvnic_send_crq(adapter, &crq);
3444
3445         if (adapter->netdev->flags & IFF_PROMISC) {
3446                 if (adapter->promisc_supported) {
3447                         crq.request_capability.capability =
3448                             cpu_to_be16(PROMISC_REQUESTED);
3449                         crq.request_capability.number = cpu_to_be64(1);
3450                         atomic_inc(&adapter->running_cap_crqs);
3451                         ibmvnic_send_crq(adapter, &crq);
3452                 }
3453         } else {
3454                 crq.request_capability.capability =
3455                     cpu_to_be16(PROMISC_REQUESTED);
3456                 crq.request_capability.number = cpu_to_be64(0);
3457                 atomic_inc(&adapter->running_cap_crqs);
3458                 ibmvnic_send_crq(adapter, &crq);
3459         }
3460 }
3461
3462 static int pending_scrq(struct ibmvnic_adapter *adapter,
3463                         struct ibmvnic_sub_crq_queue *scrq)
3464 {
3465         union sub_crq *entry = &scrq->msgs[scrq->cur];
3466
3467         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3468                 return 1;
3469         else
3470                 return 0;
3471 }
3472
3473 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3474                                         struct ibmvnic_sub_crq_queue *scrq)
3475 {
3476         union sub_crq *entry;
3477         unsigned long flags;
3478
3479         spin_lock_irqsave(&scrq->lock, flags);
3480         entry = &scrq->msgs[scrq->cur];
3481         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3482                 if (++scrq->cur == scrq->size)
3483                         scrq->cur = 0;
3484         } else {
3485                 entry = NULL;
3486         }
3487         spin_unlock_irqrestore(&scrq->lock, flags);
3488
3489         return entry;
3490 }
3491
3492 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3493 {
3494         struct ibmvnic_crq_queue *queue = &adapter->crq;
3495         union ibmvnic_crq *crq;
3496
3497         crq = &queue->msgs[queue->cur];
3498         if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3499                 if (++queue->cur == queue->size)
3500                         queue->cur = 0;
3501         } else {
3502                 crq = NULL;
3503         }
3504
3505         return crq;
3506 }
3507
3508 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3509 {
3510         switch (rc) {
3511         case H_PARAMETER:
3512                 dev_warn_ratelimited(dev,
3513                                      "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3514                                      func, rc);
3515                 break;
3516         case H_CLOSED:
3517                 dev_warn_ratelimited(dev,
3518                                      "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3519                                      func, rc);
3520                 break;
3521         default:
3522                 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3523                 break;
3524         }
3525 }
3526
3527 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3528                        union sub_crq *sub_crq)
3529 {
3530         unsigned int ua = adapter->vdev->unit_address;
3531         struct device *dev = &adapter->vdev->dev;
3532         u64 *u64_crq = (u64 *)sub_crq;
3533         int rc;
3534
3535         netdev_dbg(adapter->netdev,
3536                    "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3537                    (unsigned long int)cpu_to_be64(remote_handle),
3538                    (unsigned long int)cpu_to_be64(u64_crq[0]),
3539                    (unsigned long int)cpu_to_be64(u64_crq[1]),
3540                    (unsigned long int)cpu_to_be64(u64_crq[2]),
3541                    (unsigned long int)cpu_to_be64(u64_crq[3]));
3542
3543         /* Make sure the hypervisor sees the complete request */
3544         mb();
3545
3546         rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3547                                 cpu_to_be64(remote_handle),
3548                                 cpu_to_be64(u64_crq[0]),
3549                                 cpu_to_be64(u64_crq[1]),
3550                                 cpu_to_be64(u64_crq[2]),
3551                                 cpu_to_be64(u64_crq[3]));
3552
3553         if (rc)
3554                 print_subcrq_error(dev, rc, __func__);
3555
3556         return rc;
3557 }
3558
3559 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3560                                 u64 remote_handle, u64 ioba, u64 num_entries)
3561 {
3562         unsigned int ua = adapter->vdev->unit_address;
3563         struct device *dev = &adapter->vdev->dev;
3564         int rc;
3565
3566         /* Make sure the hypervisor sees the complete request */
3567         mb();
3568         rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3569                                 cpu_to_be64(remote_handle),
3570                                 ioba, num_entries);
3571
3572         if (rc)
3573                 print_subcrq_error(dev, rc, __func__);
3574
3575         return rc;
3576 }
3577
3578 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3579                             union ibmvnic_crq *crq)
3580 {
3581         unsigned int ua = adapter->vdev->unit_address;
3582         struct device *dev = &adapter->vdev->dev;
3583         u64 *u64_crq = (u64 *)crq;
3584         int rc;
3585
3586         netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3587                    (unsigned long int)cpu_to_be64(u64_crq[0]),
3588                    (unsigned long int)cpu_to_be64(u64_crq[1]));
3589
3590         if (!adapter->crq.active &&
3591             crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3592                 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3593                 return -EINVAL;
3594         }
3595
3596         /* Make sure the hypervisor sees the complete request */
3597         mb();
3598
3599         rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3600                                 cpu_to_be64(u64_crq[0]),
3601                                 cpu_to_be64(u64_crq[1]));
3602
3603         if (rc) {
3604                 if (rc == H_CLOSED) {
3605                         dev_warn(dev, "CRQ Queue closed\n");
3606                         /* do not reset, report the fail, wait for passive init from server */
3607                 }
3608
3609                 dev_warn(dev, "Send error (rc=%d)\n", rc);
3610         }
3611
3612         return rc;
3613 }
3614
3615 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3616 {
3617         struct device *dev = &adapter->vdev->dev;
3618         union ibmvnic_crq crq;
3619         int retries = 100;
3620         int rc;
3621
3622         memset(&crq, 0, sizeof(crq));
3623         crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3624         crq.generic.cmd = IBMVNIC_CRQ_INIT;
3625         netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3626
3627         do {
3628                 rc = ibmvnic_send_crq(adapter, &crq);
3629                 if (rc != H_CLOSED)
3630                         break;
3631                 retries--;
3632                 msleep(50);
3633
3634         } while (retries > 0);
3635
3636         if (rc) {
3637                 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3638                 return rc;
3639         }
3640
3641         return 0;
3642 }
3643
3644 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3645 {
3646         union ibmvnic_crq crq;
3647
3648         memset(&crq, 0, sizeof(crq));
3649         crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3650         crq.version_exchange.cmd = VERSION_EXCHANGE;
3651         crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3652
3653         return ibmvnic_send_crq(adapter, &crq);
3654 }
3655
3656 struct vnic_login_client_data {
3657         u8      type;
3658         __be16  len;
3659         char    name[];
3660 } __packed;
3661
3662 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3663 {
3664         int len;
3665
3666         /* Calculate the amount of buffer space needed for the
3667          * vnic client data in the login buffer. There are four entries,
3668          * OS name, LPAR name, device name, and a null last entry.
3669          */
3670         len = 4 * sizeof(struct vnic_login_client_data);
3671         len += 6; /* "Linux" plus NULL */
3672         len += strlen(utsname()->nodename) + 1;
3673         len += strlen(adapter->netdev->name) + 1;
3674
3675         return len;
3676 }
3677
3678 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3679                                  struct vnic_login_client_data *vlcd)
3680 {
3681         const char *os_name = "Linux";
3682         int len;
3683
3684         /* Type 1 - LPAR OS */
3685         vlcd->type = 1;
3686         len = strlen(os_name) + 1;
3687         vlcd->len = cpu_to_be16(len);
3688         strncpy(vlcd->name, os_name, len);
3689         vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3690
3691         /* Type 2 - LPAR name */
3692         vlcd->type = 2;
3693         len = strlen(utsname()->nodename) + 1;
3694         vlcd->len = cpu_to_be16(len);
3695         strncpy(vlcd->name, utsname()->nodename, len);
3696         vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3697
3698         /* Type 3 - device name */
3699         vlcd->type = 3;
3700         len = strlen(adapter->netdev->name) + 1;
3701         vlcd->len = cpu_to_be16(len);
3702         strncpy(vlcd->name, adapter->netdev->name, len);
3703 }
3704
3705 static int send_login(struct ibmvnic_adapter *adapter)
3706 {
3707         struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3708         struct ibmvnic_login_buffer *login_buffer;
3709         struct device *dev = &adapter->vdev->dev;
3710         dma_addr_t rsp_buffer_token;
3711         dma_addr_t buffer_token;
3712         size_t rsp_buffer_size;
3713         union ibmvnic_crq crq;
3714         size_t buffer_size;
3715         __be64 *tx_list_p;
3716         __be64 *rx_list_p;
3717         int client_data_len;
3718         struct vnic_login_client_data *vlcd;
3719         int i;
3720
3721         if (!adapter->tx_scrq || !adapter->rx_scrq) {
3722                 netdev_err(adapter->netdev,
3723                            "RX or TX queues are not allocated, device login failed\n");
3724                 return -1;
3725         }
3726
3727         release_login_rsp_buffer(adapter);
3728         client_data_len = vnic_client_data_len(adapter);
3729
3730         buffer_size =
3731             sizeof(struct ibmvnic_login_buffer) +
3732             sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3733             client_data_len;
3734
3735         login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3736         if (!login_buffer)
3737                 goto buf_alloc_failed;
3738
3739         buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3740                                       DMA_TO_DEVICE);
3741         if (dma_mapping_error(dev, buffer_token)) {
3742                 dev_err(dev, "Couldn't map login buffer\n");
3743                 goto buf_map_failed;
3744         }
3745
3746         rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3747                           sizeof(u64) * adapter->req_tx_queues +
3748                           sizeof(u64) * adapter->req_rx_queues +
3749                           sizeof(u64) * adapter->req_rx_queues +
3750                           sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3751
3752         login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3753         if (!login_rsp_buffer)
3754                 goto buf_rsp_alloc_failed;
3755
3756         rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3757                                           rsp_buffer_size, DMA_FROM_DEVICE);
3758         if (dma_mapping_error(dev, rsp_buffer_token)) {
3759                 dev_err(dev, "Couldn't map login rsp buffer\n");
3760                 goto buf_rsp_map_failed;
3761         }
3762
3763         adapter->login_buf = login_buffer;
3764         adapter->login_buf_token = buffer_token;
3765         adapter->login_buf_sz = buffer_size;
3766         adapter->login_rsp_buf = login_rsp_buffer;
3767         adapter->login_rsp_buf_token = rsp_buffer_token;
3768         adapter->login_rsp_buf_sz = rsp_buffer_size;
3769
3770         login_buffer->len = cpu_to_be32(buffer_size);
3771         login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3772         login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3773         login_buffer->off_txcomp_subcrqs =
3774             cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3775         login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3776         login_buffer->off_rxcomp_subcrqs =
3777             cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3778                         sizeof(u64) * adapter->req_tx_queues);
3779         login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3780         login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3781
3782         tx_list_p = (__be64 *)((char *)login_buffer +
3783                                       sizeof(struct ibmvnic_login_buffer));
3784         rx_list_p = (__be64 *)((char *)login_buffer +
3785                                       sizeof(struct ibmvnic_login_buffer) +
3786                                       sizeof(u64) * adapter->req_tx_queues);
3787
3788         for (i = 0; i < adapter->req_tx_queues; i++) {
3789                 if (adapter->tx_scrq[i]) {
3790                         tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3791                                                    crq_num);
3792                 }
3793         }
3794
3795         for (i = 0; i < adapter->req_rx_queues; i++) {
3796                 if (adapter->rx_scrq[i]) {
3797                         rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3798                                                    crq_num);
3799                 }
3800         }
3801
3802         /* Insert vNIC login client data */
3803         vlcd = (struct vnic_login_client_data *)
3804                 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3805         login_buffer->client_data_offset =
3806                         cpu_to_be32((char *)vlcd - (char *)login_buffer);
3807         login_buffer->client_data_len = cpu_to_be32(client_data_len);
3808
3809         vnic_add_client_data(adapter, vlcd);
3810
3811         netdev_dbg(adapter->netdev, "Login Buffer:\n");
3812         for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3813                 netdev_dbg(adapter->netdev, "%016lx\n",
3814                            ((unsigned long int *)(adapter->login_buf))[i]);
3815         }
3816
3817         memset(&crq, 0, sizeof(crq));
3818         crq.login.first = IBMVNIC_CRQ_CMD;
3819         crq.login.cmd = LOGIN;
3820         crq.login.ioba = cpu_to_be32(buffer_token);
3821         crq.login.len = cpu_to_be32(buffer_size);
3822         ibmvnic_send_crq(adapter, &crq);
3823
3824         return 0;
3825
3826 buf_rsp_map_failed:
3827         kfree(login_rsp_buffer);
3828 buf_rsp_alloc_failed:
3829         dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3830 buf_map_failed:
3831         kfree(login_buffer);
3832 buf_alloc_failed:
3833         return -1;
3834 }
3835
3836 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3837                             u32 len, u8 map_id)
3838 {
3839         union ibmvnic_crq crq;
3840
3841         memset(&crq, 0, sizeof(crq));
3842         crq.request_map.first = IBMVNIC_CRQ_CMD;
3843         crq.request_map.cmd = REQUEST_MAP;
3844         crq.request_map.map_id = map_id;
3845         crq.request_map.ioba = cpu_to_be32(addr);
3846         crq.request_map.len = cpu_to_be32(len);
3847         return ibmvnic_send_crq(adapter, &crq);
3848 }
3849
3850 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3851 {
3852         union ibmvnic_crq crq;
3853
3854         memset(&crq, 0, sizeof(crq));
3855         crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3856         crq.request_unmap.cmd = REQUEST_UNMAP;
3857         crq.request_unmap.map_id = map_id;
3858         return ibmvnic_send_crq(adapter, &crq);
3859 }
3860
3861 static void send_query_map(struct ibmvnic_adapter *adapter)
3862 {
3863         union ibmvnic_crq crq;
3864
3865         memset(&crq, 0, sizeof(crq));
3866         crq.query_map.first = IBMVNIC_CRQ_CMD;
3867         crq.query_map.cmd = QUERY_MAP;
3868         ibmvnic_send_crq(adapter, &crq);
3869 }
3870
3871 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3872 static void send_query_cap(struct ibmvnic_adapter *adapter)
3873 {
3874         union ibmvnic_crq crq;
3875
3876         atomic_set(&adapter->running_cap_crqs, 0);
3877         memset(&crq, 0, sizeof(crq));
3878         crq.query_capability.first = IBMVNIC_CRQ_CMD;
3879         crq.query_capability.cmd = QUERY_CAPABILITY;
3880
3881         crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3882         atomic_inc(&adapter->running_cap_crqs);
3883         ibmvnic_send_crq(adapter, &crq);
3884
3885         crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3886         atomic_inc(&adapter->running_cap_crqs);
3887         ibmvnic_send_crq(adapter, &crq);
3888
3889         crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3890         atomic_inc(&adapter->running_cap_crqs);
3891         ibmvnic_send_crq(adapter, &crq);
3892
3893         crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3894         atomic_inc(&adapter->running_cap_crqs);
3895         ibmvnic_send_crq(adapter, &crq);
3896
3897         crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3898         atomic_inc(&adapter->running_cap_crqs);
3899         ibmvnic_send_crq(adapter, &crq);
3900
3901         crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3902         atomic_inc(&adapter->running_cap_crqs);
3903         ibmvnic_send_crq(adapter, &crq);
3904
3905         crq.query_capability.capability =
3906             cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3907         atomic_inc(&adapter->running_cap_crqs);
3908         ibmvnic_send_crq(adapter, &crq);
3909
3910         crq.query_capability.capability =
3911             cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3912         atomic_inc(&adapter->running_cap_crqs);
3913         ibmvnic_send_crq(adapter, &crq);
3914
3915         crq.query_capability.capability =
3916             cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3917         atomic_inc(&adapter->running_cap_crqs);
3918         ibmvnic_send_crq(adapter, &crq);
3919
3920         crq.query_capability.capability =
3921             cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3922         atomic_inc(&adapter->running_cap_crqs);
3923         ibmvnic_send_crq(adapter, &crq);
3924
3925         crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3926         atomic_inc(&adapter->running_cap_crqs);
3927         ibmvnic_send_crq(adapter, &crq);
3928
3929         crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3930         atomic_inc(&adapter->running_cap_crqs);
3931         ibmvnic_send_crq(adapter, &crq);
3932
3933         crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3934         atomic_inc(&adapter->running_cap_crqs);
3935         ibmvnic_send_crq(adapter, &crq);
3936
3937         crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3938         atomic_inc(&adapter->running_cap_crqs);
3939         ibmvnic_send_crq(adapter, &crq);
3940
3941         crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3942         atomic_inc(&adapter->running_cap_crqs);
3943         ibmvnic_send_crq(adapter, &crq);
3944
3945         crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3946         atomic_inc(&adapter->running_cap_crqs);
3947         ibmvnic_send_crq(adapter, &crq);
3948
3949         crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3950         atomic_inc(&adapter->running_cap_crqs);
3951         ibmvnic_send_crq(adapter, &crq);
3952
3953         crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3954         atomic_inc(&adapter->running_cap_crqs);
3955         ibmvnic_send_crq(adapter, &crq);
3956
3957         crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3958         atomic_inc(&adapter->running_cap_crqs);
3959         ibmvnic_send_crq(adapter, &crq);
3960
3961         crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3962         atomic_inc(&adapter->running_cap_crqs);
3963         ibmvnic_send_crq(adapter, &crq);
3964
3965         crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3966         atomic_inc(&adapter->running_cap_crqs);
3967         ibmvnic_send_crq(adapter, &crq);
3968
3969         crq.query_capability.capability =
3970                         cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3971         atomic_inc(&adapter->running_cap_crqs);
3972         ibmvnic_send_crq(adapter, &crq);
3973
3974         crq.query_capability.capability =
3975                         cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3976         atomic_inc(&adapter->running_cap_crqs);
3977         ibmvnic_send_crq(adapter, &crq);
3978
3979         crq.query_capability.capability =
3980                         cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3981         atomic_inc(&adapter->running_cap_crqs);
3982         ibmvnic_send_crq(adapter, &crq);
3983
3984         crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3985         atomic_inc(&adapter->running_cap_crqs);
3986         ibmvnic_send_crq(adapter, &crq);
3987 }
3988
3989 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
3990 {
3991         int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3992         struct device *dev = &adapter->vdev->dev;
3993         union ibmvnic_crq crq;
3994
3995         adapter->ip_offload_tok =
3996                 dma_map_single(dev,
3997                                &adapter->ip_offload_buf,
3998                                buf_sz,
3999                                DMA_FROM_DEVICE);
4000
4001         if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4002                 if (!firmware_has_feature(FW_FEATURE_CMO))
4003                         dev_err(dev, "Couldn't map offload buffer\n");
4004                 return;
4005         }
4006
4007         memset(&crq, 0, sizeof(crq));
4008         crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4009         crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4010         crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4011         crq.query_ip_offload.ioba =
4012             cpu_to_be32(adapter->ip_offload_tok);
4013
4014         ibmvnic_send_crq(adapter, &crq);
4015 }
4016
4017 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4018 {
4019         struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4020         struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4021         struct device *dev = &adapter->vdev->dev;
4022         netdev_features_t old_hw_features = 0;
4023         union ibmvnic_crq crq;
4024
4025         adapter->ip_offload_ctrl_tok =
4026                 dma_map_single(dev,
4027                                ctrl_buf,
4028                                sizeof(adapter->ip_offload_ctrl),
4029                                DMA_TO_DEVICE);
4030
4031         if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4032                 dev_err(dev, "Couldn't map ip offload control buffer\n");
4033                 return;
4034         }
4035
4036         ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4037         ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4038         ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4039         ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4040         ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4041         ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4042         ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4043         ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4044         ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4045         ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4046
4047         /* large_rx disabled for now, additional features needed */
4048         ctrl_buf->large_rx_ipv4 = 0;
4049         ctrl_buf->large_rx_ipv6 = 0;
4050
4051         if (adapter->state != VNIC_PROBING) {
4052                 old_hw_features = adapter->netdev->hw_features;
4053                 adapter->netdev->hw_features = 0;
4054         }
4055
4056         adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4057
4058         if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4059                 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4060
4061         if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4062                 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4063
4064         if ((adapter->netdev->features &
4065             (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4066                 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4067
4068         if (buf->large_tx_ipv4)
4069                 adapter->netdev->hw_features |= NETIF_F_TSO;
4070         if (buf->large_tx_ipv6)
4071                 adapter->netdev->hw_features |= NETIF_F_TSO6;
4072
4073         if (adapter->state == VNIC_PROBING) {
4074                 adapter->netdev->features |= adapter->netdev->hw_features;
4075         } else if (old_hw_features != adapter->netdev->hw_features) {
4076                 netdev_features_t tmp = 0;
4077
4078                 /* disable features no longer supported */
4079                 adapter->netdev->features &= adapter->netdev->hw_features;
4080                 /* turn on features now supported if previously enabled */
4081                 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4082                         adapter->netdev->hw_features;
4083                 adapter->netdev->features |=
4084                                 tmp & adapter->netdev->wanted_features;
4085         }
4086
4087         memset(&crq, 0, sizeof(crq));
4088         crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4089         crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4090         crq.control_ip_offload.len =
4091             cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4092         crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4093         ibmvnic_send_crq(adapter, &crq);
4094 }
4095
4096 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4097                                 struct ibmvnic_adapter *adapter)
4098 {
4099         struct device *dev = &adapter->vdev->dev;
4100
4101         if (crq->get_vpd_size_rsp.rc.code) {
4102                 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4103                         crq->get_vpd_size_rsp.rc.code);
4104                 complete(&adapter->fw_done);
4105                 return;
4106         }
4107
4108         adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4109         complete(&adapter->fw_done);
4110 }
4111
4112 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4113                            struct ibmvnic_adapter *adapter)
4114 {
4115         struct device *dev = &adapter->vdev->dev;
4116         unsigned char *substr = NULL;
4117         u8 fw_level_len = 0;
4118
4119         memset(adapter->fw_version, 0, 32);
4120
4121         dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4122                          DMA_FROM_DEVICE);
4123
4124         if (crq->get_vpd_rsp.rc.code) {
4125                 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4126                         crq->get_vpd_rsp.rc.code);
4127                 goto complete;
4128         }
4129
4130         /* get the position of the firmware version info
4131          * located after the ASCII 'RM' substring in the buffer
4132          */
4133         substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4134         if (!substr) {
4135                 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4136                 goto complete;
4137         }
4138
4139         /* get length of firmware level ASCII substring */
4140         if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4141                 fw_level_len = *(substr + 2);
4142         } else {
4143                 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4144                 goto complete;
4145         }
4146
4147         /* copy firmware version string from vpd into adapter */
4148         if ((substr + 3 + fw_level_len) <
4149             (adapter->vpd->buff + adapter->vpd->len)) {
4150                 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4151         } else {
4152                 dev_info(dev, "FW substr extrapolated VPD buff\n");
4153         }
4154
4155 complete:
4156         if (adapter->fw_version[0] == '\0')
4157                 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4158         complete(&adapter->fw_done);
4159 }
4160
4161 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4162 {
4163         struct device *dev = &adapter->vdev->dev;
4164         struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4165         int i;
4166
4167         dma_unmap_single(dev, adapter->ip_offload_tok,
4168                          sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4169
4170         netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4171         for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4172                 netdev_dbg(adapter->netdev, "%016lx\n",
4173                            ((unsigned long int *)(buf))[i]);
4174
4175         netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4176         netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4177         netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4178                    buf->tcp_ipv4_chksum);
4179         netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4180                    buf->tcp_ipv6_chksum);
4181         netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4182                    buf->udp_ipv4_chksum);
4183         netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4184                    buf->udp_ipv6_chksum);
4185         netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4186                    buf->large_tx_ipv4);
4187         netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4188                    buf->large_tx_ipv6);
4189         netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4190                    buf->large_rx_ipv4);
4191         netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4192                    buf->large_rx_ipv6);
4193         netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4194                    buf->max_ipv4_header_size);
4195         netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4196                    buf->max_ipv6_header_size);
4197         netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4198                    buf->max_tcp_header_size);
4199         netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4200                    buf->max_udp_header_size);
4201         netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4202                    buf->max_large_tx_size);
4203         netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4204                    buf->max_large_rx_size);
4205         netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4206                    buf->ipv6_extension_header);
4207         netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4208                    buf->tcp_pseudosum_req);
4209         netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4210                    buf->num_ipv6_ext_headers);
4211         netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4212                    buf->off_ipv6_ext_headers);
4213
4214         send_control_ip_offload(adapter);
4215 }
4216
4217 static const char *ibmvnic_fw_err_cause(u16 cause)
4218 {
4219         switch (cause) {
4220         case ADAPTER_PROBLEM:
4221                 return "adapter problem";
4222         case BUS_PROBLEM:
4223                 return "bus problem";
4224         case FW_PROBLEM:
4225                 return "firmware problem";
4226         case DD_PROBLEM:
4227                 return "device driver problem";
4228         case EEH_RECOVERY:
4229                 return "EEH recovery";
4230         case FW_UPDATED:
4231                 return "firmware updated";
4232         case LOW_MEMORY:
4233                 return "low Memory";
4234         default:
4235                 return "unknown";
4236         }
4237 }
4238
4239 static void handle_error_indication(union ibmvnic_crq *crq,
4240                                     struct ibmvnic_adapter *adapter)
4241 {
4242         struct device *dev = &adapter->vdev->dev;
4243         u16 cause;
4244
4245         cause = be16_to_cpu(crq->error_indication.error_cause);
4246
4247         dev_warn_ratelimited(dev,
4248                              "Firmware reports %serror, cause: %s. Starting recovery...\n",
4249                              crq->error_indication.flags
4250                                 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4251                              ibmvnic_fw_err_cause(cause));
4252
4253         if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4254                 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4255         else
4256                 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4257 }
4258
4259 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4260                                  struct ibmvnic_adapter *adapter)
4261 {
4262         struct net_device *netdev = adapter->netdev;
4263         struct device *dev = &adapter->vdev->dev;
4264         long rc;
4265
4266         rc = crq->change_mac_addr_rsp.rc.code;
4267         if (rc) {
4268                 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4269                 goto out;
4270         }
4271         /* crq->change_mac_addr.mac_addr is the requested one
4272          * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4273          */
4274         ether_addr_copy(netdev->dev_addr,
4275                         &crq->change_mac_addr_rsp.mac_addr[0]);
4276         ether_addr_copy(adapter->mac_addr,
4277                         &crq->change_mac_addr_rsp.mac_addr[0]);
4278 out:
4279         complete(&adapter->fw_done);
4280         return rc;
4281 }
4282
4283 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4284                                    struct ibmvnic_adapter *adapter)
4285 {
4286         struct device *dev = &adapter->vdev->dev;
4287         u64 *req_value;
4288         char *name;
4289
4290         atomic_dec(&adapter->running_cap_crqs);
4291         switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4292         case REQ_TX_QUEUES:
4293                 req_value = &adapter->req_tx_queues;
4294                 name = "tx";
4295                 break;
4296         case REQ_RX_QUEUES:
4297                 req_value = &adapter->req_rx_queues;
4298                 name = "rx";
4299                 break;
4300         case REQ_RX_ADD_QUEUES:
4301                 req_value = &adapter->req_rx_add_queues;
4302                 name = "rx_add";
4303                 break;
4304         case REQ_TX_ENTRIES_PER_SUBCRQ:
4305                 req_value = &adapter->req_tx_entries_per_subcrq;
4306                 name = "tx_entries_per_subcrq";
4307                 break;
4308         case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4309                 req_value = &adapter->req_rx_add_entries_per_subcrq;
4310                 name = "rx_add_entries_per_subcrq";
4311                 break;
4312         case REQ_MTU:
4313                 req_value = &adapter->req_mtu;
4314                 name = "mtu";
4315                 break;
4316         case PROMISC_REQUESTED:
4317                 req_value = &adapter->promisc;
4318                 name = "promisc";
4319                 break;
4320         default:
4321                 dev_err(dev, "Got invalid cap request rsp %d\n",
4322                         crq->request_capability.capability);
4323                 return;
4324         }
4325
4326         switch (crq->request_capability_rsp.rc.code) {
4327         case SUCCESS:
4328                 break;
4329         case PARTIALSUCCESS:
4330                 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4331                          *req_value,
4332                          (long int)be64_to_cpu(crq->request_capability_rsp.
4333                                                number), name);
4334
4335                 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4336                     REQ_MTU) {
4337                         pr_err("mtu of %llu is not supported. Reverting.\n",
4338                                *req_value);
4339                         *req_value = adapter->fallback.mtu;
4340                 } else {
4341                         *req_value =
4342                                 be64_to_cpu(crq->request_capability_rsp.number);
4343                 }
4344
4345                 send_request_cap(adapter, 1);
4346                 return;
4347         default:
4348                 dev_err(dev, "Error %d in request cap rsp\n",
4349                         crq->request_capability_rsp.rc.code);
4350                 return;
4351         }
4352
4353         /* Done receiving requested capabilities, query IP offload support */
4354         if (atomic_read(&adapter->running_cap_crqs) == 0) {
4355                 adapter->wait_capability = false;
4356                 send_query_ip_offload(adapter);
4357         }
4358 }
4359
4360 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4361                             struct ibmvnic_adapter *adapter)
4362 {
4363         struct device *dev = &adapter->vdev->dev;
4364         struct net_device *netdev = adapter->netdev;
4365         struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4366         struct ibmvnic_login_buffer *login = adapter->login_buf;
4367         u64 *tx_handle_array;
4368         u64 *rx_handle_array;
4369         int num_tx_pools;
4370         int num_rx_pools;
4371         u64 *size_array;
4372         int i;
4373
4374         dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4375                          DMA_TO_DEVICE);
4376         dma_unmap_single(dev, adapter->login_rsp_buf_token,
4377                          adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4378
4379         /* If the number of queues requested can't be allocated by the
4380          * server, the login response will return with code 1. We will need
4381          * to resend the login buffer with fewer queues requested.
4382          */
4383         if (login_rsp_crq->generic.rc.code) {
4384                 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4385                 complete(&adapter->init_done);
4386                 return 0;
4387         }
4388
4389         netdev->mtu = adapter->req_mtu - ETH_HLEN;
4390
4391         netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4392         for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4393                 netdev_dbg(adapter->netdev, "%016lx\n",
4394                            ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4395         }
4396
4397         /* Sanity checks */
4398         if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4399             (be32_to_cpu(login->num_rxcomp_subcrqs) *
4400              adapter->req_rx_add_queues !=
4401              be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4402                 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4403                 ibmvnic_remove(adapter->vdev);
4404                 return -EIO;
4405         }
4406         size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4407                 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4408         /* variable buffer sizes are not supported, so just read the
4409          * first entry.
4410          */
4411         adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4412
4413         num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4414         num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4415
4416         tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4417                                   be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4418         rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4419                                   be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4420
4421         for (i = 0; i < num_tx_pools; i++)
4422                 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4423
4424         for (i = 0; i < num_rx_pools; i++)
4425                 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4426
4427         adapter->num_active_tx_scrqs = num_tx_pools;
4428         adapter->num_active_rx_scrqs = num_rx_pools;
4429         release_login_rsp_buffer(adapter);
4430         release_login_buffer(adapter);
4431         complete(&adapter->init_done);
4432
4433         return 0;
4434 }
4435
4436 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4437                                      struct ibmvnic_adapter *adapter)
4438 {
4439         struct device *dev = &adapter->vdev->dev;
4440         long rc;
4441
4442         rc = crq->request_unmap_rsp.rc.code;
4443         if (rc)
4444                 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4445 }
4446
4447 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4448                                  struct ibmvnic_adapter *adapter)
4449 {
4450         struct net_device *netdev = adapter->netdev;
4451         struct device *dev = &adapter->vdev->dev;
4452         long rc;
4453
4454         rc = crq->query_map_rsp.rc.code;
4455         if (rc) {
4456                 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4457                 return;
4458         }
4459         netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4460                    crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4461                    crq->query_map_rsp.free_pages);
4462 }
4463
4464 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4465                                  struct ibmvnic_adapter *adapter)
4466 {
4467         struct net_device *netdev = adapter->netdev;
4468         struct device *dev = &adapter->vdev->dev;
4469         long rc;
4470
4471         atomic_dec(&adapter->running_cap_crqs);
4472         netdev_dbg(netdev, "Outstanding queries: %d\n",
4473                    atomic_read(&adapter->running_cap_crqs));
4474         rc = crq->query_capability.rc.code;
4475         if (rc) {
4476                 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4477                 goto out;
4478         }
4479
4480         switch (be16_to_cpu(crq->query_capability.capability)) {
4481         case MIN_TX_QUEUES:
4482                 adapter->min_tx_queues =
4483                     be64_to_cpu(crq->query_capability.number);
4484                 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4485                            adapter->min_tx_queues);
4486                 break;
4487         case MIN_RX_QUEUES:
4488                 adapter->min_rx_queues =
4489                     be64_to_cpu(crq->query_capability.number);
4490                 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4491                            adapter->min_rx_queues);
4492                 break;
4493         case MIN_RX_ADD_QUEUES:
4494                 adapter->min_rx_add_queues =
4495                     be64_to_cpu(crq->query_capability.number);
4496                 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4497                            adapter->min_rx_add_queues);
4498                 break;
4499         case MAX_TX_QUEUES:
4500                 adapter->max_tx_queues =
4501                     be64_to_cpu(crq->query_capability.number);
4502                 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4503                            adapter->max_tx_queues);
4504                 break;
4505         case MAX_RX_QUEUES:
4506                 adapter->max_rx_queues =
4507                     be64_to_cpu(crq->query_capability.number);
4508                 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4509                            adapter->max_rx_queues);
4510                 break;
4511         case MAX_RX_ADD_QUEUES:
4512                 adapter->max_rx_add_queues =
4513                     be64_to_cpu(crq->query_capability.number);
4514                 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4515                            adapter->max_rx_add_queues);
4516                 break;
4517         case MIN_TX_ENTRIES_PER_SUBCRQ:
4518                 adapter->min_tx_entries_per_subcrq =
4519                     be64_to_cpu(crq->query_capability.number);
4520                 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4521                            adapter->min_tx_entries_per_subcrq);
4522                 break;
4523         case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4524                 adapter->min_rx_add_entries_per_subcrq =
4525                     be64_to_cpu(crq->query_capability.number);
4526                 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4527                            adapter->min_rx_add_entries_per_subcrq);
4528                 break;
4529         case MAX_TX_ENTRIES_PER_SUBCRQ:
4530                 adapter->max_tx_entries_per_subcrq =
4531                     be64_to_cpu(crq->query_capability.number);
4532                 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4533                            adapter->max_tx_entries_per_subcrq);
4534                 break;
4535         case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4536                 adapter->max_rx_add_entries_per_subcrq =
4537                     be64_to_cpu(crq->query_capability.number);
4538                 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4539                            adapter->max_rx_add_entries_per_subcrq);
4540                 break;
4541         case TCP_IP_OFFLOAD:
4542                 adapter->tcp_ip_offload =
4543                     be64_to_cpu(crq->query_capability.number);
4544                 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4545                            adapter->tcp_ip_offload);
4546                 break;
4547         case PROMISC_SUPPORTED:
4548                 adapter->promisc_supported =
4549                     be64_to_cpu(crq->query_capability.number);
4550                 netdev_dbg(netdev, "promisc_supported = %lld\n",
4551                            adapter->promisc_supported);
4552                 break;
4553         case MIN_MTU:
4554                 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4555                 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4556                 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4557                 break;
4558         case MAX_MTU:
4559                 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4560                 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4561                 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4562                 break;
4563         case MAX_MULTICAST_FILTERS:
4564                 adapter->max_multicast_filters =
4565                     be64_to_cpu(crq->query_capability.number);
4566                 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4567                            adapter->max_multicast_filters);
4568                 break;
4569         case VLAN_HEADER_INSERTION:
4570                 adapter->vlan_header_insertion =
4571                     be64_to_cpu(crq->query_capability.number);
4572                 if (adapter->vlan_header_insertion)
4573                         netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4574                 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4575                            adapter->vlan_header_insertion);
4576                 break;
4577         case RX_VLAN_HEADER_INSERTION:
4578                 adapter->rx_vlan_header_insertion =
4579                     be64_to_cpu(crq->query_capability.number);
4580                 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4581                            adapter->rx_vlan_header_insertion);
4582                 break;
4583         case MAX_TX_SG_ENTRIES:
4584                 adapter->max_tx_sg_entries =
4585                     be64_to_cpu(crq->query_capability.number);
4586                 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4587                            adapter->max_tx_sg_entries);
4588                 break;
4589         case RX_SG_SUPPORTED:
4590                 adapter->rx_sg_supported =
4591                     be64_to_cpu(crq->query_capability.number);
4592                 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4593                            adapter->rx_sg_supported);
4594                 break;
4595         case OPT_TX_COMP_SUB_QUEUES:
4596                 adapter->opt_tx_comp_sub_queues =
4597                     be64_to_cpu(crq->query_capability.number);
4598                 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4599                            adapter->opt_tx_comp_sub_queues);
4600                 break;
4601         case OPT_RX_COMP_QUEUES:
4602                 adapter->opt_rx_comp_queues =
4603                     be64_to_cpu(crq->query_capability.number);
4604                 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4605                            adapter->opt_rx_comp_queues);
4606                 break;
4607         case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4608                 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4609                     be64_to_cpu(crq->query_capability.number);
4610                 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4611                            adapter->opt_rx_bufadd_q_per_rx_comp_q);
4612                 break;
4613         case OPT_TX_ENTRIES_PER_SUBCRQ:
4614                 adapter->opt_tx_entries_per_subcrq =
4615                     be64_to_cpu(crq->query_capability.number);
4616                 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4617                            adapter->opt_tx_entries_per_subcrq);
4618                 break;
4619         case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4620                 adapter->opt_rxba_entries_per_subcrq =
4621                     be64_to_cpu(crq->query_capability.number);
4622                 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4623                            adapter->opt_rxba_entries_per_subcrq);
4624                 break;
4625         case TX_RX_DESC_REQ:
4626                 adapter->tx_rx_desc_req = crq->query_capability.number;
4627                 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4628                            adapter->tx_rx_desc_req);
4629                 break;
4630
4631         default:
4632                 netdev_err(netdev, "Got invalid cap rsp %d\n",
4633                            crq->query_capability.capability);
4634         }
4635
4636 out:
4637         if (atomic_read(&adapter->running_cap_crqs) == 0) {
4638                 adapter->wait_capability = false;
4639                 send_request_cap(adapter, 0);
4640         }
4641 }
4642
4643 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4644 {
4645         union ibmvnic_crq crq;
4646         int rc;
4647
4648         memset(&crq, 0, sizeof(crq));
4649         crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4650         crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4651
4652         mutex_lock(&adapter->fw_lock);
4653         adapter->fw_done_rc = 0;
4654         reinit_completion(&adapter->fw_done);
4655
4656         rc = ibmvnic_send_crq(adapter, &crq);
4657         if (rc) {
4658                 mutex_unlock(&adapter->fw_lock);
4659                 return rc;
4660         }
4661
4662         rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4663         if (rc) {
4664                 mutex_unlock(&adapter->fw_lock);
4665                 return rc;
4666         }
4667
4668         mutex_unlock(&adapter->fw_lock);
4669         return adapter->fw_done_rc ? -EIO : 0;
4670 }
4671
4672 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4673                                        struct ibmvnic_adapter *adapter)
4674 {
4675         struct net_device *netdev = adapter->netdev;
4676         int rc;
4677         __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4678
4679         rc = crq->query_phys_parms_rsp.rc.code;
4680         if (rc) {
4681                 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4682                 return rc;
4683         }
4684         switch (rspeed) {
4685         case IBMVNIC_10MBPS:
4686                 adapter->speed = SPEED_10;
4687                 break;
4688         case IBMVNIC_100MBPS:
4689                 adapter->speed = SPEED_100;
4690                 break;
4691         case IBMVNIC_1GBPS:
4692                 adapter->speed = SPEED_1000;
4693                 break;
4694         case IBMVNIC_10GBPS:
4695                 adapter->speed = SPEED_10000;
4696                 break;
4697         case IBMVNIC_25GBPS:
4698                 adapter->speed = SPEED_25000;
4699                 break;
4700         case IBMVNIC_40GBPS:
4701                 adapter->speed = SPEED_40000;
4702                 break;
4703         case IBMVNIC_50GBPS:
4704                 adapter->speed = SPEED_50000;
4705                 break;
4706         case IBMVNIC_100GBPS:
4707                 adapter->speed = SPEED_100000;
4708                 break;
4709         case IBMVNIC_200GBPS:
4710                 adapter->speed = SPEED_200000;
4711                 break;
4712         default:
4713                 if (netif_carrier_ok(netdev))
4714                         netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4715                 adapter->speed = SPEED_UNKNOWN;
4716         }
4717         if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4718                 adapter->duplex = DUPLEX_FULL;
4719         else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4720                 adapter->duplex = DUPLEX_HALF;
4721         else
4722                 adapter->duplex = DUPLEX_UNKNOWN;
4723
4724         return rc;
4725 }
4726
4727 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4728                                struct ibmvnic_adapter *adapter)
4729 {
4730         struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4731         struct net_device *netdev = adapter->netdev;
4732         struct device *dev = &adapter->vdev->dev;
4733         u64 *u64_crq = (u64 *)crq;
4734         long rc;
4735
4736         netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4737                    (unsigned long int)cpu_to_be64(u64_crq[0]),
4738                    (unsigned long int)cpu_to_be64(u64_crq[1]));
4739         switch (gen_crq->first) {
4740         case IBMVNIC_CRQ_INIT_RSP:
4741                 switch (gen_crq->cmd) {
4742                 case IBMVNIC_CRQ_INIT:
4743                         dev_info(dev, "Partner initialized\n");
4744                         adapter->from_passive_init = true;
4745                         if (!completion_done(&adapter->init_done)) {
4746                                 complete(&adapter->init_done);
4747                                 adapter->init_done_rc = -EIO;
4748                         }
4749                         ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4750                         break;
4751                 case IBMVNIC_CRQ_INIT_COMPLETE:
4752                         dev_info(dev, "Partner initialization complete\n");
4753                         adapter->crq.active = true;
4754                         send_version_xchg(adapter);
4755                         break;
4756                 default:
4757                         dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4758                 }
4759                 return;
4760         case IBMVNIC_CRQ_XPORT_EVENT:
4761                 netif_carrier_off(netdev);
4762                 adapter->crq.active = false;
4763                 /* terminate any thread waiting for a response
4764                  * from the device
4765                  */
4766                 if (!completion_done(&adapter->fw_done)) {
4767                         adapter->fw_done_rc = -EIO;
4768                         complete(&adapter->fw_done);
4769                 }
4770                 if (!completion_done(&adapter->stats_done))
4771                         complete(&adapter->stats_done);
4772                 if (test_bit(0, &adapter->resetting))
4773                         adapter->force_reset_recovery = true;
4774                 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4775                         dev_info(dev, "Migrated, re-enabling adapter\n");
4776                         ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4777                 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4778                         dev_info(dev, "Backing device failover detected\n");
4779                         adapter->failover_pending = true;
4780                 } else {
4781                         /* The adapter lost the connection */
4782                         dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4783                                 gen_crq->cmd);
4784                         ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4785                 }
4786                 return;
4787         case IBMVNIC_CRQ_CMD_RSP:
4788                 break;
4789         default:
4790                 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4791                         gen_crq->first);
4792                 return;
4793         }
4794
4795         switch (gen_crq->cmd) {
4796         case VERSION_EXCHANGE_RSP:
4797                 rc = crq->version_exchange_rsp.rc.code;
4798                 if (rc) {
4799                         dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4800                         break;
4801                 }
4802                 ibmvnic_version =
4803                             be16_to_cpu(crq->version_exchange_rsp.version);
4804                 dev_info(dev, "Partner protocol version is %d\n",
4805                          ibmvnic_version);
4806                 send_query_cap(adapter);
4807                 break;
4808         case QUERY_CAPABILITY_RSP:
4809                 handle_query_cap_rsp(crq, adapter);
4810                 break;
4811         case QUERY_MAP_RSP:
4812                 handle_query_map_rsp(crq, adapter);
4813                 break;
4814         case REQUEST_MAP_RSP:
4815                 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4816                 complete(&adapter->fw_done);
4817                 break;
4818         case REQUEST_UNMAP_RSP:
4819                 handle_request_unmap_rsp(crq, adapter);
4820                 break;
4821         case REQUEST_CAPABILITY_RSP:
4822                 handle_request_cap_rsp(crq, adapter);
4823                 break;
4824         case LOGIN_RSP:
4825                 netdev_dbg(netdev, "Got Login Response\n");
4826                 handle_login_rsp(crq, adapter);
4827                 break;
4828         case LOGICAL_LINK_STATE_RSP:
4829                 netdev_dbg(netdev,
4830                            "Got Logical Link State Response, state: %d rc: %d\n",
4831                            crq->logical_link_state_rsp.link_state,
4832                            crq->logical_link_state_rsp.rc.code);
4833                 adapter->logical_link_state =
4834                     crq->logical_link_state_rsp.link_state;
4835                 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4836                 complete(&adapter->init_done);
4837                 break;
4838         case LINK_STATE_INDICATION:
4839                 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4840                 adapter->phys_link_state =
4841                     crq->link_state_indication.phys_link_state;
4842                 adapter->logical_link_state =
4843                     crq->link_state_indication.logical_link_state;
4844                 if (adapter->phys_link_state && adapter->logical_link_state)
4845                         netif_carrier_on(netdev);
4846                 else
4847                         netif_carrier_off(netdev);
4848                 break;
4849         case CHANGE_MAC_ADDR_RSP:
4850                 netdev_dbg(netdev, "Got MAC address change Response\n");
4851                 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4852                 break;
4853         case ERROR_INDICATION:
4854                 netdev_dbg(netdev, "Got Error Indication\n");
4855                 handle_error_indication(crq, adapter);
4856                 break;
4857         case REQUEST_STATISTICS_RSP:
4858                 netdev_dbg(netdev, "Got Statistics Response\n");
4859                 complete(&adapter->stats_done);
4860                 break;
4861         case QUERY_IP_OFFLOAD_RSP:
4862                 netdev_dbg(netdev, "Got Query IP offload Response\n");
4863                 handle_query_ip_offload_rsp(adapter);
4864                 break;
4865         case MULTICAST_CTRL_RSP:
4866                 netdev_dbg(netdev, "Got multicast control Response\n");
4867                 break;
4868         case CONTROL_IP_OFFLOAD_RSP:
4869                 netdev_dbg(netdev, "Got Control IP offload Response\n");
4870                 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4871                                  sizeof(adapter->ip_offload_ctrl),
4872                                  DMA_TO_DEVICE);
4873                 complete(&adapter->init_done);
4874                 break;
4875         case COLLECT_FW_TRACE_RSP:
4876                 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4877                 complete(&adapter->fw_done);
4878                 break;
4879         case GET_VPD_SIZE_RSP:
4880                 handle_vpd_size_rsp(crq, adapter);
4881                 break;
4882         case GET_VPD_RSP:
4883                 handle_vpd_rsp(crq, adapter);
4884                 break;
4885         case QUERY_PHYS_PARMS_RSP:
4886                 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4887                 complete(&adapter->fw_done);
4888                 break;
4889         default:
4890                 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4891                            gen_crq->cmd);
4892         }
4893 }
4894
4895 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4896 {
4897         struct ibmvnic_adapter *adapter = instance;
4898
4899         tasklet_schedule(&adapter->tasklet);
4900         return IRQ_HANDLED;
4901 }
4902
4903 static void ibmvnic_tasklet(struct tasklet_struct *t)
4904 {
4905         struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
4906         struct ibmvnic_crq_queue *queue = &adapter->crq;
4907         union ibmvnic_crq *crq;
4908         unsigned long flags;
4909         bool done = false;
4910
4911         spin_lock_irqsave(&queue->lock, flags);
4912         while (!done) {
4913                 /* Pull all the valid messages off the CRQ */
4914                 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4915                         ibmvnic_handle_crq(crq, adapter);
4916                         crq->generic.first = 0;
4917                 }
4918
4919                 /* remain in tasklet until all
4920                  * capabilities responses are received
4921                  */
4922                 if (!adapter->wait_capability)
4923                         done = true;
4924         }
4925         /* if capabilities CRQ's were sent in this tasklet, the following
4926          * tasklet must wait until all responses are received
4927          */
4928         if (atomic_read(&adapter->running_cap_crqs) != 0)
4929                 adapter->wait_capability = true;
4930         spin_unlock_irqrestore(&queue->lock, flags);
4931 }
4932
4933 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4934 {
4935         struct vio_dev *vdev = adapter->vdev;
4936         int rc;
4937
4938         do {
4939                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4940         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4941
4942         if (rc)
4943                 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4944
4945         return rc;
4946 }
4947
4948 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4949 {
4950         struct ibmvnic_crq_queue *crq = &adapter->crq;
4951         struct device *dev = &adapter->vdev->dev;
4952         struct vio_dev *vdev = adapter->vdev;
4953         int rc;
4954
4955         /* Close the CRQ */
4956         do {
4957                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4958         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4959
4960         /* Clean out the queue */
4961         memset(crq->msgs, 0, PAGE_SIZE);
4962         crq->cur = 0;
4963         crq->active = false;
4964
4965         /* And re-open it again */
4966         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4967                                 crq->msg_token, PAGE_SIZE);
4968
4969         if (rc == H_CLOSED)
4970                 /* Adapter is good, but other end is not ready */
4971                 dev_warn(dev, "Partner adapter not ready\n");
4972         else if (rc != 0)
4973                 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4974
4975         return rc;
4976 }
4977
4978 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4979 {
4980         struct ibmvnic_crq_queue *crq = &adapter->crq;
4981         struct vio_dev *vdev = adapter->vdev;
4982         long rc;
4983
4984         if (!crq->msgs)
4985                 return;
4986
4987         netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4988         free_irq(vdev->irq, adapter);
4989         tasklet_kill(&adapter->tasklet);
4990         do {
4991                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4992         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4993
4994         dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4995                          DMA_BIDIRECTIONAL);
4996         free_page((unsigned long)crq->msgs);
4997         crq->msgs = NULL;
4998         crq->active = false;
4999 }
5000
5001 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5002 {
5003         struct ibmvnic_crq_queue *crq = &adapter->crq;
5004         struct device *dev = &adapter->vdev->dev;
5005         struct vio_dev *vdev = adapter->vdev;
5006         int rc, retrc = -ENOMEM;
5007
5008         if (crq->msgs)
5009                 return 0;
5010
5011         crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5012         /* Should we allocate more than one page? */
5013
5014         if (!crq->msgs)
5015                 return -ENOMEM;
5016
5017         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5018         crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5019                                         DMA_BIDIRECTIONAL);
5020         if (dma_mapping_error(dev, crq->msg_token))
5021                 goto map_failed;
5022
5023         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5024                                 crq->msg_token, PAGE_SIZE);
5025
5026         if (rc == H_RESOURCE)
5027                 /* maybe kexecing and resource is busy. try a reset */
5028                 rc = ibmvnic_reset_crq(adapter);
5029         retrc = rc;
5030
5031         if (rc == H_CLOSED) {
5032                 dev_warn(dev, "Partner adapter not ready\n");
5033         } else if (rc) {
5034                 dev_warn(dev, "Error %d opening adapter\n", rc);
5035                 goto reg_crq_failed;
5036         }
5037
5038         retrc = 0;
5039
5040         tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5041
5042         netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5043         snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5044                  adapter->vdev->unit_address);
5045         rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5046         if (rc) {
5047                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5048                         vdev->irq, rc);
5049                 goto req_irq_failed;
5050         }
5051
5052         rc = vio_enable_interrupts(vdev);
5053         if (rc) {
5054                 dev_err(dev, "Error %d enabling interrupts\n", rc);
5055                 goto req_irq_failed;
5056         }
5057
5058         crq->cur = 0;
5059         spin_lock_init(&crq->lock);
5060
5061         return retrc;
5062
5063 req_irq_failed:
5064         tasklet_kill(&adapter->tasklet);
5065         do {
5066                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5067         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5068 reg_crq_failed:
5069         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5070 map_failed:
5071         free_page((unsigned long)crq->msgs);
5072         crq->msgs = NULL;
5073         return retrc;
5074 }
5075
5076 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5077 {
5078         struct device *dev = &adapter->vdev->dev;
5079         unsigned long timeout = msecs_to_jiffies(30000);
5080         u64 old_num_rx_queues, old_num_tx_queues;
5081         int rc;
5082
5083         adapter->from_passive_init = false;
5084
5085         if (reset) {
5086                 old_num_rx_queues = adapter->req_rx_queues;
5087                 old_num_tx_queues = adapter->req_tx_queues;
5088                 reinit_completion(&adapter->init_done);
5089         }
5090
5091         adapter->init_done_rc = 0;
5092         rc = ibmvnic_send_crq_init(adapter);
5093         if (rc) {
5094                 dev_err(dev, "Send crq init failed with error %d\n", rc);
5095                 return rc;
5096         }
5097
5098         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5099                 dev_err(dev, "Initialization sequence timed out\n");
5100                 return -1;
5101         }
5102
5103         if (adapter->init_done_rc) {
5104                 release_crq_queue(adapter);
5105                 return adapter->init_done_rc;
5106         }
5107
5108         if (adapter->from_passive_init) {
5109                 adapter->state = VNIC_OPEN;
5110                 adapter->from_passive_init = false;
5111                 return -1;
5112         }
5113
5114         if (reset &&
5115             test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5116             adapter->reset_reason != VNIC_RESET_MOBILITY) {
5117                 if (adapter->req_rx_queues != old_num_rx_queues ||
5118                     adapter->req_tx_queues != old_num_tx_queues) {
5119                         release_sub_crqs(adapter, 0);
5120                         rc = init_sub_crqs(adapter);
5121                 } else {
5122                         rc = reset_sub_crq_queues(adapter);
5123                 }
5124         } else {
5125                 rc = init_sub_crqs(adapter);
5126         }
5127
5128         if (rc) {
5129                 dev_err(dev, "Initialization of sub crqs failed\n");
5130                 release_crq_queue(adapter);
5131                 return rc;
5132         }
5133
5134         rc = init_sub_crq_irqs(adapter);
5135         if (rc) {
5136                 dev_err(dev, "Failed to initialize sub crq irqs\n");
5137                 release_crq_queue(adapter);
5138         }
5139
5140         return rc;
5141 }
5142
5143 static struct device_attribute dev_attr_failover;
5144
5145 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5146 {
5147         struct ibmvnic_adapter *adapter;
5148         struct net_device *netdev;
5149         unsigned char *mac_addr_p;
5150         int rc;
5151
5152         dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5153                 dev->unit_address);
5154
5155         mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5156                                                         VETH_MAC_ADDR, NULL);
5157         if (!mac_addr_p) {
5158                 dev_err(&dev->dev,
5159                         "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5160                         __FILE__, __LINE__);
5161                 return 0;
5162         }
5163
5164         netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5165                                    IBMVNIC_MAX_QUEUES);
5166         if (!netdev)
5167                 return -ENOMEM;
5168
5169         adapter = netdev_priv(netdev);
5170         adapter->state = VNIC_PROBING;
5171         dev_set_drvdata(&dev->dev, netdev);
5172         adapter->vdev = dev;
5173         adapter->netdev = netdev;
5174
5175         ether_addr_copy(adapter->mac_addr, mac_addr_p);
5176         ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5177         netdev->irq = dev->irq;
5178         netdev->netdev_ops = &ibmvnic_netdev_ops;
5179         netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5180         SET_NETDEV_DEV(netdev, &dev->dev);
5181
5182         spin_lock_init(&adapter->stats_lock);
5183
5184         INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5185         INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5186                           __ibmvnic_delayed_reset);
5187         INIT_LIST_HEAD(&adapter->rwi_list);
5188         spin_lock_init(&adapter->rwi_lock);
5189         spin_lock_init(&adapter->state_lock);
5190         mutex_init(&adapter->fw_lock);
5191         init_completion(&adapter->init_done);
5192         init_completion(&adapter->fw_done);
5193         init_completion(&adapter->reset_done);
5194         init_completion(&adapter->stats_done);
5195         clear_bit(0, &adapter->resetting);
5196
5197         do {
5198                 rc = init_crq_queue(adapter);
5199                 if (rc) {
5200                         dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5201                                 rc);
5202                         goto ibmvnic_init_fail;
5203                 }
5204
5205                 rc = ibmvnic_reset_init(adapter, false);
5206                 if (rc && rc != EAGAIN)
5207                         goto ibmvnic_init_fail;
5208         } while (rc == EAGAIN);
5209
5210         rc = init_stats_buffers(adapter);
5211         if (rc)
5212                 goto ibmvnic_init_fail;
5213
5214         rc = init_stats_token(adapter);
5215         if (rc)
5216                 goto ibmvnic_stats_fail;
5217
5218         netdev->mtu = adapter->req_mtu - ETH_HLEN;
5219         netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5220         netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5221
5222         rc = device_create_file(&dev->dev, &dev_attr_failover);
5223         if (rc)
5224                 goto ibmvnic_dev_file_err;
5225
5226         netif_carrier_off(netdev);
5227         rc = register_netdev(netdev);
5228         if (rc) {
5229                 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5230                 goto ibmvnic_register_fail;
5231         }
5232         dev_info(&dev->dev, "ibmvnic registered\n");
5233
5234         adapter->state = VNIC_PROBED;
5235
5236         adapter->wait_for_reset = false;
5237
5238         return 0;
5239
5240 ibmvnic_register_fail:
5241         device_remove_file(&dev->dev, &dev_attr_failover);
5242
5243 ibmvnic_dev_file_err:
5244         release_stats_token(adapter);
5245
5246 ibmvnic_stats_fail:
5247         release_stats_buffers(adapter);
5248
5249 ibmvnic_init_fail:
5250         release_sub_crqs(adapter, 1);
5251         release_crq_queue(adapter);
5252         mutex_destroy(&adapter->fw_lock);
5253         free_netdev(netdev);
5254
5255         return rc;
5256 }
5257
5258 static int ibmvnic_remove(struct vio_dev *dev)
5259 {
5260         struct net_device *netdev = dev_get_drvdata(&dev->dev);
5261         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5262         unsigned long flags;
5263
5264         spin_lock_irqsave(&adapter->state_lock, flags);
5265         if (adapter->state == VNIC_RESETTING) {
5266                 spin_unlock_irqrestore(&adapter->state_lock, flags);
5267                 return -EBUSY;
5268         }
5269
5270         adapter->state = VNIC_REMOVING;
5271         spin_unlock_irqrestore(&adapter->state_lock, flags);
5272
5273         flush_work(&adapter->ibmvnic_reset);
5274         flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5275
5276         rtnl_lock();
5277         unregister_netdevice(netdev);
5278
5279         release_resources(adapter);
5280         release_sub_crqs(adapter, 1);
5281         release_crq_queue(adapter);
5282
5283         release_stats_token(adapter);
5284         release_stats_buffers(adapter);
5285
5286         adapter->state = VNIC_REMOVED;
5287
5288         rtnl_unlock();
5289         mutex_destroy(&adapter->fw_lock);
5290         device_remove_file(&dev->dev, &dev_attr_failover);
5291         free_netdev(netdev);
5292         dev_set_drvdata(&dev->dev, NULL);
5293
5294         return 0;
5295 }
5296
5297 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5298                               const char *buf, size_t count)
5299 {
5300         struct net_device *netdev = dev_get_drvdata(dev);
5301         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5302         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5303         __be64 session_token;
5304         long rc;
5305
5306         if (!sysfs_streq(buf, "1"))
5307                 return -EINVAL;
5308
5309         rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5310                          H_GET_SESSION_TOKEN, 0, 0, 0);
5311         if (rc) {
5312                 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5313                            rc);
5314                 return -EINVAL;
5315         }
5316
5317         session_token = (__be64)retbuf[0];
5318         netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5319                    be64_to_cpu(session_token));
5320         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5321                                 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5322         if (rc) {
5323                 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5324                            rc);
5325                 return -EINVAL;
5326         }
5327
5328         return count;
5329 }
5330
5331 static DEVICE_ATTR_WO(failover);
5332
5333 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5334 {
5335         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5336         struct ibmvnic_adapter *adapter;
5337         struct iommu_table *tbl;
5338         unsigned long ret = 0;
5339         int i;
5340
5341         tbl = get_iommu_table_base(&vdev->dev);
5342
5343         /* netdev inits at probe time along with the structures we need below*/
5344         if (!netdev)
5345                 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5346
5347         adapter = netdev_priv(netdev);
5348
5349         ret += PAGE_SIZE; /* the crq message queue */
5350         ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5351
5352         for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5353                 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5354
5355         for (i = 0; i < adapter->num_active_rx_pools; i++)
5356                 ret += adapter->rx_pool[i].size *
5357                     IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5358
5359         return ret;
5360 }
5361
5362 static int ibmvnic_resume(struct device *dev)
5363 {
5364         struct net_device *netdev = dev_get_drvdata(dev);
5365         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5366
5367         if (adapter->state != VNIC_OPEN)
5368                 return 0;
5369
5370         tasklet_schedule(&adapter->tasklet);
5371
5372         return 0;
5373 }
5374
5375 static const struct vio_device_id ibmvnic_device_table[] = {
5376         {"network", "IBM,vnic"},
5377         {"", "" }
5378 };
5379 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5380
5381 static const struct dev_pm_ops ibmvnic_pm_ops = {
5382         .resume = ibmvnic_resume
5383 };
5384
5385 static struct vio_driver ibmvnic_driver = {
5386         .id_table       = ibmvnic_device_table,
5387         .probe          = ibmvnic_probe,
5388         .remove         = ibmvnic_remove,
5389         .get_desired_dma = ibmvnic_get_desired_dma,
5390         .name           = ibmvnic_driver_name,
5391         .pm             = &ibmvnic_pm_ops,
5392 };
5393
5394 /* module functions */
5395 static int __init ibmvnic_module_init(void)
5396 {
5397         pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5398                 IBMVNIC_DRIVER_VERSION);
5399
5400         return vio_register_driver(&ibmvnic_driver);
5401 }
5402
5403 static void __exit ibmvnic_module_exit(void)
5404 {
5405         vio_unregister_driver(&ibmvnic_driver);
5406 }
5407
5408 module_init(ibmvnic_module_init);
5409 module_exit(ibmvnic_module_exit);