1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
89 static int enable_scrq_irq(struct ibmvnic_adapter *,
90 struct ibmvnic_sub_crq_queue *);
91 static int disable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int pending_scrq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static int ibmvnic_poll(struct napi_struct *napi, int data);
98 static void send_query_map(struct ibmvnic_adapter *adapter);
99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
100 static int send_request_unmap(struct ibmvnic_adapter *, u8);
101 static int send_login(struct ibmvnic_adapter *adapter);
102 static void send_query_cap(struct ibmvnic_adapter *adapter);
103 static int init_sub_crqs(struct ibmvnic_adapter *);
104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
106 static void release_crq_queue(struct ibmvnic_adapter *);
107 static int __ibmvnic_set_mac(struct net_device *, u8 *);
108 static int init_crq_queue(struct ibmvnic_adapter *adapter);
109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
111 struct ibmvnic_stat {
112 char name[ETH_GSTRING_LEN];
116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 offsetof(struct ibmvnic_statistics, stat))
118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
120 static const struct ibmvnic_stat ibmvnic_stats[] = {
121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
131 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
146 unsigned long length, unsigned long *number,
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
160 * ibmvnic_wait_for_completion - Check device state and wait for completion
161 * @adapter: private device data
162 * @comp_done: completion structure to wait for
163 * @timeout: time to wait in milliseconds
165 * Wait for a completion signal or until the timeout limit is reached
166 * while checking that the device is still active.
168 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
169 struct completion *comp_done,
170 unsigned long timeout)
172 struct net_device *netdev;
173 unsigned long div_timeout;
176 netdev = adapter->netdev;
178 div_timeout = msecs_to_jiffies(timeout / retry);
180 if (!adapter->crq.active) {
181 netdev_err(netdev, "Device down!\n");
186 if (wait_for_completion_timeout(comp_done, div_timeout))
189 netdev_err(netdev, "Operation timed out.\n");
193 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
194 struct ibmvnic_long_term_buff *ltb, int size)
196 struct device *dev = &adapter->vdev->dev;
200 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
204 dev_err(dev, "Couldn't alloc long term buffer\n");
207 ltb->map_id = adapter->map_id;
210 mutex_lock(&adapter->fw_lock);
211 adapter->fw_done_rc = 0;
212 reinit_completion(&adapter->fw_done);
213 rc = send_request_map(adapter, ltb->addr,
214 ltb->size, ltb->map_id);
216 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
217 mutex_unlock(&adapter->fw_lock);
221 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 "Long term map request aborted or timed out,rc = %d\n",
226 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
227 mutex_unlock(&adapter->fw_lock);
231 if (adapter->fw_done_rc) {
232 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
233 adapter->fw_done_rc);
234 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
235 mutex_unlock(&adapter->fw_lock);
238 mutex_unlock(&adapter->fw_lock);
242 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_long_term_buff *ltb)
245 struct device *dev = &adapter->vdev->dev;
250 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
251 adapter->reset_reason != VNIC_RESET_MOBILITY)
252 send_request_unmap(adapter, ltb->map_id);
253 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
256 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
257 struct ibmvnic_long_term_buff *ltb)
259 struct device *dev = &adapter->vdev->dev;
262 memset(ltb->buff, 0, ltb->size);
264 mutex_lock(&adapter->fw_lock);
265 adapter->fw_done_rc = 0;
267 reinit_completion(&adapter->fw_done);
268 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
270 mutex_unlock(&adapter->fw_lock);
274 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277 "Reset failed, long term map request timed out or aborted\n");
278 mutex_unlock(&adapter->fw_lock);
282 if (adapter->fw_done_rc) {
284 "Reset failed, attempting to free and reallocate buffer\n");
285 free_long_term_buff(adapter, ltb);
286 mutex_unlock(&adapter->fw_lock);
287 return alloc_long_term_buff(adapter, ltb, ltb->size);
289 mutex_unlock(&adapter->fw_lock);
293 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
297 for (i = 0; i < adapter->num_active_rx_pools; i++)
298 adapter->rx_pool[i].active = 0;
301 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
302 struct ibmvnic_rx_pool *pool)
304 int count = pool->size - atomic_read(&pool->available);
305 u64 handle = adapter->rx_scrq[pool->index]->handle;
306 struct device *dev = &adapter->vdev->dev;
307 struct ibmvnic_ind_xmit_queue *ind_bufp;
308 struct ibmvnic_sub_crq_queue *rx_scrq;
309 union sub_crq *sub_crq;
310 int buffers_added = 0;
311 unsigned long lpar_rc;
323 rx_scrq = adapter->rx_scrq[pool->index];
324 ind_bufp = &rx_scrq->ind_buf;
325 for (i = 0; i < count; ++i) {
326 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
328 dev_err(dev, "Couldn't replenish rx buff\n");
329 adapter->replenish_no_mem++;
333 index = pool->free_map[pool->next_free];
335 if (pool->rx_buff[index].skb)
336 dev_err(dev, "Inconsistent free_map!\n");
338 /* Copy the skb to the long term mapped DMA buffer */
339 offset = index * pool->buff_size;
340 dst = pool->long_term_buff.buff + offset;
341 memset(dst, 0, pool->buff_size);
342 dma_addr = pool->long_term_buff.addr + offset;
343 pool->rx_buff[index].data = dst;
345 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
346 pool->rx_buff[index].dma = dma_addr;
347 pool->rx_buff[index].skb = skb;
348 pool->rx_buff[index].pool_index = pool->index;
349 pool->rx_buff[index].size = pool->buff_size;
351 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
352 memset(sub_crq, 0, sizeof(*sub_crq));
353 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
354 sub_crq->rx_add.correlator =
355 cpu_to_be64((u64)&pool->rx_buff[index]);
356 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
357 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
359 /* The length field of the sCRQ is defined to be 24 bits so the
360 * buffer size needs to be left shifted by a byte before it is
361 * converted to big endian to prevent the last byte from being
364 #ifdef __LITTLE_ENDIAN__
367 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
368 pool->next_free = (pool->next_free + 1) % pool->size;
369 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
372 send_subcrq_indirect(adapter, handle,
373 (u64)ind_bufp->indir_dma,
374 (u64)ind_bufp->index);
375 if (lpar_rc != H_SUCCESS)
377 buffers_added += ind_bufp->index;
378 adapter->replenish_add_buff_success += ind_bufp->index;
382 atomic_add(buffers_added, &pool->available);
386 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
387 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
388 for (i = ind_bufp->index - 1; i >= 0; --i) {
389 struct ibmvnic_rx_buff *rx_buff;
391 pool->next_free = pool->next_free == 0 ?
392 pool->size - 1 : pool->next_free - 1;
393 sub_crq = &ind_bufp->indir_arr[i];
394 rx_buff = (struct ibmvnic_rx_buff *)
395 be64_to_cpu(sub_crq->rx_add.correlator);
396 index = (int)(rx_buff - pool->rx_buff);
397 pool->free_map[pool->next_free] = index;
398 dev_kfree_skb_any(pool->rx_buff[index].skb);
399 pool->rx_buff[index].skb = NULL;
402 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
403 /* Disable buffer pool replenishment and report carrier off if
404 * queue is closed or pending failover.
405 * Firmware guarantees that a signal will be sent to the
406 * driver, triggering a reset.
408 deactivate_rx_pools(adapter);
409 netif_carrier_off(adapter->netdev);
413 static void replenish_pools(struct ibmvnic_adapter *adapter)
417 adapter->replenish_task_cycles++;
418 for (i = 0; i < adapter->num_active_rx_pools; i++) {
419 if (adapter->rx_pool[i].active)
420 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
424 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
426 kfree(adapter->tx_stats_buffers);
427 kfree(adapter->rx_stats_buffers);
428 adapter->tx_stats_buffers = NULL;
429 adapter->rx_stats_buffers = NULL;
432 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
434 adapter->tx_stats_buffers =
435 kcalloc(IBMVNIC_MAX_QUEUES,
436 sizeof(struct ibmvnic_tx_queue_stats),
438 if (!adapter->tx_stats_buffers)
441 adapter->rx_stats_buffers =
442 kcalloc(IBMVNIC_MAX_QUEUES,
443 sizeof(struct ibmvnic_rx_queue_stats),
445 if (!adapter->rx_stats_buffers)
451 static void release_stats_token(struct ibmvnic_adapter *adapter)
453 struct device *dev = &adapter->vdev->dev;
455 if (!adapter->stats_token)
458 dma_unmap_single(dev, adapter->stats_token,
459 sizeof(struct ibmvnic_statistics),
461 adapter->stats_token = 0;
464 static int init_stats_token(struct ibmvnic_adapter *adapter)
466 struct device *dev = &adapter->vdev->dev;
469 stok = dma_map_single(dev, &adapter->stats,
470 sizeof(struct ibmvnic_statistics),
472 if (dma_mapping_error(dev, stok)) {
473 dev_err(dev, "Couldn't map stats buffer\n");
477 adapter->stats_token = stok;
478 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
482 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
484 struct ibmvnic_rx_pool *rx_pool;
489 if (!adapter->rx_pool)
492 buff_size = adapter->cur_rx_buf_sz;
493 rx_scrqs = adapter->num_active_rx_pools;
494 for (i = 0; i < rx_scrqs; i++) {
495 rx_pool = &adapter->rx_pool[i];
497 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
499 if (rx_pool->buff_size != buff_size) {
500 free_long_term_buff(adapter, &rx_pool->long_term_buff);
501 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
502 rc = alloc_long_term_buff(adapter,
503 &rx_pool->long_term_buff,
507 rc = reset_long_term_buff(adapter,
508 &rx_pool->long_term_buff);
514 for (j = 0; j < rx_pool->size; j++)
515 rx_pool->free_map[j] = j;
517 memset(rx_pool->rx_buff, 0,
518 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
520 atomic_set(&rx_pool->available, 0);
521 rx_pool->next_alloc = 0;
522 rx_pool->next_free = 0;
529 static void release_rx_pools(struct ibmvnic_adapter *adapter)
531 struct ibmvnic_rx_pool *rx_pool;
534 if (!adapter->rx_pool)
537 for (i = 0; i < adapter->num_active_rx_pools; i++) {
538 rx_pool = &adapter->rx_pool[i];
540 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
542 kfree(rx_pool->free_map);
543 free_long_term_buff(adapter, &rx_pool->long_term_buff);
545 if (!rx_pool->rx_buff)
548 for (j = 0; j < rx_pool->size; j++) {
549 if (rx_pool->rx_buff[j].skb) {
550 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
551 rx_pool->rx_buff[j].skb = NULL;
555 kfree(rx_pool->rx_buff);
558 kfree(adapter->rx_pool);
559 adapter->rx_pool = NULL;
560 adapter->num_active_rx_pools = 0;
563 static int init_rx_pools(struct net_device *netdev)
565 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
566 struct device *dev = &adapter->vdev->dev;
567 struct ibmvnic_rx_pool *rx_pool;
572 rxadd_subcrqs = adapter->num_active_rx_scrqs;
573 buff_size = adapter->cur_rx_buf_sz;
575 adapter->rx_pool = kcalloc(rxadd_subcrqs,
576 sizeof(struct ibmvnic_rx_pool),
578 if (!adapter->rx_pool) {
579 dev_err(dev, "Failed to allocate rx pools\n");
583 adapter->num_active_rx_pools = rxadd_subcrqs;
585 for (i = 0; i < rxadd_subcrqs; i++) {
586 rx_pool = &adapter->rx_pool[i];
588 netdev_dbg(adapter->netdev,
589 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
590 i, adapter->req_rx_add_entries_per_subcrq,
593 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
595 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
598 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
600 if (!rx_pool->free_map) {
601 release_rx_pools(adapter);
605 rx_pool->rx_buff = kcalloc(rx_pool->size,
606 sizeof(struct ibmvnic_rx_buff),
608 if (!rx_pool->rx_buff) {
609 dev_err(dev, "Couldn't alloc rx buffers\n");
610 release_rx_pools(adapter);
614 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
615 rx_pool->size * rx_pool->buff_size)) {
616 release_rx_pools(adapter);
620 for (j = 0; j < rx_pool->size; ++j)
621 rx_pool->free_map[j] = j;
623 atomic_set(&rx_pool->available, 0);
624 rx_pool->next_alloc = 0;
625 rx_pool->next_free = 0;
631 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
632 struct ibmvnic_tx_pool *tx_pool)
636 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
640 memset(tx_pool->tx_buff, 0,
641 tx_pool->num_buffers *
642 sizeof(struct ibmvnic_tx_buff));
644 for (i = 0; i < tx_pool->num_buffers; i++)
645 tx_pool->free_map[i] = i;
647 tx_pool->consumer_index = 0;
648 tx_pool->producer_index = 0;
653 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
658 if (!adapter->tx_pool)
661 tx_scrqs = adapter->num_active_tx_pools;
662 for (i = 0; i < tx_scrqs; i++) {
663 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
666 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
674 static void release_vpd_data(struct ibmvnic_adapter *adapter)
679 kfree(adapter->vpd->buff);
685 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
686 struct ibmvnic_tx_pool *tx_pool)
688 kfree(tx_pool->tx_buff);
689 kfree(tx_pool->free_map);
690 free_long_term_buff(adapter, &tx_pool->long_term_buff);
693 static void release_tx_pools(struct ibmvnic_adapter *adapter)
697 if (!adapter->tx_pool)
700 for (i = 0; i < adapter->num_active_tx_pools; i++) {
701 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
702 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
705 kfree(adapter->tx_pool);
706 adapter->tx_pool = NULL;
707 kfree(adapter->tso_pool);
708 adapter->tso_pool = NULL;
709 adapter->num_active_tx_pools = 0;
712 static int init_one_tx_pool(struct net_device *netdev,
713 struct ibmvnic_tx_pool *tx_pool,
714 int num_entries, int buf_size)
716 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
719 tx_pool->tx_buff = kcalloc(num_entries,
720 sizeof(struct ibmvnic_tx_buff),
722 if (!tx_pool->tx_buff)
725 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
726 num_entries * buf_size))
729 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
730 if (!tx_pool->free_map)
733 for (i = 0; i < num_entries; i++)
734 tx_pool->free_map[i] = i;
736 tx_pool->consumer_index = 0;
737 tx_pool->producer_index = 0;
738 tx_pool->num_buffers = num_entries;
739 tx_pool->buf_size = buf_size;
744 static int init_tx_pools(struct net_device *netdev)
746 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
751 tx_subcrqs = adapter->num_active_tx_scrqs;
752 adapter->tx_pool = kcalloc(tx_subcrqs,
753 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
754 if (!adapter->tx_pool)
757 adapter->tso_pool = kcalloc(tx_subcrqs,
758 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
759 if (!adapter->tso_pool)
762 adapter->num_active_tx_pools = tx_subcrqs;
764 for (i = 0; i < tx_subcrqs; i++) {
765 buff_size = adapter->req_mtu + VLAN_HLEN;
766 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
767 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
768 adapter->req_tx_entries_per_subcrq,
771 release_tx_pools(adapter);
775 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
779 release_tx_pools(adapter);
787 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
791 if (adapter->napi_enabled)
794 for (i = 0; i < adapter->req_rx_queues; i++)
795 napi_enable(&adapter->napi[i]);
797 adapter->napi_enabled = true;
800 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
804 if (!adapter->napi_enabled)
807 for (i = 0; i < adapter->req_rx_queues; i++) {
808 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
809 napi_disable(&adapter->napi[i]);
812 adapter->napi_enabled = false;
815 static int init_napi(struct ibmvnic_adapter *adapter)
819 adapter->napi = kcalloc(adapter->req_rx_queues,
820 sizeof(struct napi_struct), GFP_KERNEL);
824 for (i = 0; i < adapter->req_rx_queues; i++) {
825 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
826 netif_napi_add(adapter->netdev, &adapter->napi[i],
827 ibmvnic_poll, NAPI_POLL_WEIGHT);
830 adapter->num_active_rx_napi = adapter->req_rx_queues;
834 static void release_napi(struct ibmvnic_adapter *adapter)
841 for (i = 0; i < adapter->num_active_rx_napi; i++) {
842 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
843 netif_napi_del(&adapter->napi[i]);
846 kfree(adapter->napi);
847 adapter->napi = NULL;
848 adapter->num_active_rx_napi = 0;
849 adapter->napi_enabled = false;
852 static int ibmvnic_login(struct net_device *netdev)
854 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
855 unsigned long timeout = msecs_to_jiffies(20000);
863 if (retry_count > retries) {
864 netdev_warn(netdev, "Login attempts exceeded\n");
868 adapter->init_done_rc = 0;
869 reinit_completion(&adapter->init_done);
870 rc = send_login(adapter);
874 if (!wait_for_completion_timeout(&adapter->init_done,
876 netdev_warn(netdev, "Login timed out, retrying...\n");
878 adapter->init_done_rc = 0;
883 if (adapter->init_done_rc == ABORTED) {
884 netdev_warn(netdev, "Login aborted, retrying...\n");
886 adapter->init_done_rc = 0;
888 /* FW or device may be busy, so
889 * wait a bit before retrying login
892 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
894 release_sub_crqs(adapter, 1);
898 "Received partial success, retrying...\n");
899 adapter->init_done_rc = 0;
900 reinit_completion(&adapter->init_done);
901 send_query_cap(adapter);
902 if (!wait_for_completion_timeout(&adapter->init_done,
905 "Capabilities query timed out\n");
909 rc = init_sub_crqs(adapter);
912 "SCRQ initialization failed\n");
916 rc = init_sub_crq_irqs(adapter);
919 "SCRQ irq initialization failed\n");
922 } else if (adapter->init_done_rc) {
923 netdev_warn(netdev, "Adapter login failed\n");
928 __ibmvnic_set_mac(netdev, adapter->mac_addr);
933 static void release_login_buffer(struct ibmvnic_adapter *adapter)
935 kfree(adapter->login_buf);
936 adapter->login_buf = NULL;
939 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
941 kfree(adapter->login_rsp_buf);
942 adapter->login_rsp_buf = NULL;
945 static void release_resources(struct ibmvnic_adapter *adapter)
947 release_vpd_data(adapter);
949 release_tx_pools(adapter);
950 release_rx_pools(adapter);
952 release_napi(adapter);
953 release_login_rsp_buffer(adapter);
956 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
958 struct net_device *netdev = adapter->netdev;
959 unsigned long timeout = msecs_to_jiffies(20000);
960 union ibmvnic_crq crq;
964 netdev_dbg(netdev, "setting link state %d\n", link_state);
966 memset(&crq, 0, sizeof(crq));
967 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
968 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
969 crq.logical_link_state.link_state = link_state;
974 reinit_completion(&adapter->init_done);
975 rc = ibmvnic_send_crq(adapter, &crq);
977 netdev_err(netdev, "Failed to set link state\n");
981 if (!wait_for_completion_timeout(&adapter->init_done,
983 netdev_err(netdev, "timeout setting link state\n");
987 if (adapter->init_done_rc == PARTIALSUCCESS) {
988 /* Partuial success, delay and re-send */
991 } else if (adapter->init_done_rc) {
992 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
993 adapter->init_done_rc);
994 return adapter->init_done_rc;
1001 static int set_real_num_queues(struct net_device *netdev)
1003 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1006 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1007 adapter->req_tx_queues, adapter->req_rx_queues);
1009 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1011 netdev_err(netdev, "failed to set the number of tx queues\n");
1015 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1017 netdev_err(netdev, "failed to set the number of rx queues\n");
1022 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1024 struct device *dev = &adapter->vdev->dev;
1025 union ibmvnic_crq crq;
1029 if (adapter->vpd->buff)
1030 len = adapter->vpd->len;
1032 mutex_lock(&adapter->fw_lock);
1033 adapter->fw_done_rc = 0;
1034 reinit_completion(&adapter->fw_done);
1036 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1037 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1038 rc = ibmvnic_send_crq(adapter, &crq);
1040 mutex_unlock(&adapter->fw_lock);
1044 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1046 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1047 mutex_unlock(&adapter->fw_lock);
1050 mutex_unlock(&adapter->fw_lock);
1052 if (!adapter->vpd->len)
1055 if (!adapter->vpd->buff)
1056 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1057 else if (adapter->vpd->len != len)
1058 adapter->vpd->buff =
1059 krealloc(adapter->vpd->buff,
1060 adapter->vpd->len, GFP_KERNEL);
1062 if (!adapter->vpd->buff) {
1063 dev_err(dev, "Could allocate VPD buffer\n");
1067 adapter->vpd->dma_addr =
1068 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1070 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1071 dev_err(dev, "Could not map VPD buffer\n");
1072 kfree(adapter->vpd->buff);
1073 adapter->vpd->buff = NULL;
1077 mutex_lock(&adapter->fw_lock);
1078 adapter->fw_done_rc = 0;
1079 reinit_completion(&adapter->fw_done);
1081 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1082 crq.get_vpd.cmd = GET_VPD;
1083 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1084 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1085 rc = ibmvnic_send_crq(adapter, &crq);
1087 kfree(adapter->vpd->buff);
1088 adapter->vpd->buff = NULL;
1089 mutex_unlock(&adapter->fw_lock);
1093 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1095 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1096 kfree(adapter->vpd->buff);
1097 adapter->vpd->buff = NULL;
1098 mutex_unlock(&adapter->fw_lock);
1102 mutex_unlock(&adapter->fw_lock);
1106 static int init_resources(struct ibmvnic_adapter *adapter)
1108 struct net_device *netdev = adapter->netdev;
1111 rc = set_real_num_queues(netdev);
1115 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1119 /* Vital Product Data (VPD) */
1120 rc = ibmvnic_get_vpd(adapter);
1122 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1126 adapter->map_id = 1;
1128 rc = init_napi(adapter);
1132 send_query_map(adapter);
1134 rc = init_rx_pools(netdev);
1138 rc = init_tx_pools(netdev);
1142 static int __ibmvnic_open(struct net_device *netdev)
1144 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1145 enum vnic_state prev_state = adapter->state;
1148 adapter->state = VNIC_OPENING;
1149 replenish_pools(adapter);
1150 ibmvnic_napi_enable(adapter);
1152 /* We're ready to receive frames, enable the sub-crq interrupts and
1153 * set the logical link state to up
1155 for (i = 0; i < adapter->req_rx_queues; i++) {
1156 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1157 if (prev_state == VNIC_CLOSED)
1158 enable_irq(adapter->rx_scrq[i]->irq);
1159 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1162 for (i = 0; i < adapter->req_tx_queues; i++) {
1163 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1164 if (prev_state == VNIC_CLOSED)
1165 enable_irq(adapter->tx_scrq[i]->irq);
1166 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1167 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1170 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1172 for (i = 0; i < adapter->req_rx_queues; i++)
1173 napi_disable(&adapter->napi[i]);
1174 release_resources(adapter);
1178 netif_tx_start_all_queues(netdev);
1180 if (prev_state == VNIC_CLOSED) {
1181 for (i = 0; i < adapter->req_rx_queues; i++)
1182 napi_schedule(&adapter->napi[i]);
1185 adapter->state = VNIC_OPEN;
1189 static int ibmvnic_open(struct net_device *netdev)
1191 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1194 /* If device failover is pending, just set device state and return.
1195 * Device operation will be handled by reset routine.
1197 if (adapter->failover_pending) {
1198 adapter->state = VNIC_OPEN;
1202 if (adapter->state != VNIC_CLOSED) {
1203 rc = ibmvnic_login(netdev);
1207 rc = init_resources(adapter);
1209 netdev_err(netdev, "failed to initialize resources\n");
1210 release_resources(adapter);
1215 rc = __ibmvnic_open(netdev);
1219 * If open fails due to a pending failover, set device state and
1220 * return. Device operation will be handled by reset routine.
1222 if (rc && adapter->failover_pending) {
1223 adapter->state = VNIC_OPEN;
1229 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1231 struct ibmvnic_rx_pool *rx_pool;
1232 struct ibmvnic_rx_buff *rx_buff;
1237 if (!adapter->rx_pool)
1240 rx_scrqs = adapter->num_active_rx_pools;
1241 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1243 /* Free any remaining skbs in the rx buffer pools */
1244 for (i = 0; i < rx_scrqs; i++) {
1245 rx_pool = &adapter->rx_pool[i];
1246 if (!rx_pool || !rx_pool->rx_buff)
1249 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1250 for (j = 0; j < rx_entries; j++) {
1251 rx_buff = &rx_pool->rx_buff[j];
1252 if (rx_buff && rx_buff->skb) {
1253 dev_kfree_skb_any(rx_buff->skb);
1254 rx_buff->skb = NULL;
1260 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1261 struct ibmvnic_tx_pool *tx_pool)
1263 struct ibmvnic_tx_buff *tx_buff;
1267 if (!tx_pool || !tx_pool->tx_buff)
1270 tx_entries = tx_pool->num_buffers;
1272 for (i = 0; i < tx_entries; i++) {
1273 tx_buff = &tx_pool->tx_buff[i];
1274 if (tx_buff && tx_buff->skb) {
1275 dev_kfree_skb_any(tx_buff->skb);
1276 tx_buff->skb = NULL;
1281 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1286 if (!adapter->tx_pool || !adapter->tso_pool)
1289 tx_scrqs = adapter->num_active_tx_pools;
1291 /* Free any remaining skbs in the tx buffer pools */
1292 for (i = 0; i < tx_scrqs; i++) {
1293 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1294 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1295 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1299 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1301 struct net_device *netdev = adapter->netdev;
1304 if (adapter->tx_scrq) {
1305 for (i = 0; i < adapter->req_tx_queues; i++)
1306 if (adapter->tx_scrq[i]->irq) {
1308 "Disabling tx_scrq[%d] irq\n", i);
1309 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1310 disable_irq(adapter->tx_scrq[i]->irq);
1314 if (adapter->rx_scrq) {
1315 for (i = 0; i < adapter->req_rx_queues; i++) {
1316 if (adapter->rx_scrq[i]->irq) {
1318 "Disabling rx_scrq[%d] irq\n", i);
1319 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1320 disable_irq(adapter->rx_scrq[i]->irq);
1326 static void ibmvnic_cleanup(struct net_device *netdev)
1328 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1330 /* ensure that transmissions are stopped if called by do_reset */
1331 if (test_bit(0, &adapter->resetting))
1332 netif_tx_disable(netdev);
1334 netif_tx_stop_all_queues(netdev);
1336 ibmvnic_napi_disable(adapter);
1337 ibmvnic_disable_irqs(adapter);
1339 clean_rx_pools(adapter);
1340 clean_tx_pools(adapter);
1343 static int __ibmvnic_close(struct net_device *netdev)
1345 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1348 adapter->state = VNIC_CLOSING;
1349 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1352 adapter->state = VNIC_CLOSED;
1356 static int ibmvnic_close(struct net_device *netdev)
1358 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1361 /* If device failover is pending, just set device state and return.
1362 * Device operation will be handled by reset routine.
1364 if (adapter->failover_pending) {
1365 adapter->state = VNIC_CLOSED;
1369 rc = __ibmvnic_close(netdev);
1370 ibmvnic_cleanup(netdev);
1376 * build_hdr_data - creates L2/L3/L4 header data buffer
1377 * @hdr_field - bitfield determining needed headers
1378 * @skb - socket buffer
1379 * @hdr_len - array of header lengths
1380 * @tot_len - total length of data
1382 * Reads hdr_field to determine which headers are needed by firmware.
1383 * Builds a buffer containing these headers. Saves individual header
1384 * lengths and total buffer length to be used to build descriptors.
1386 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1387 int *hdr_len, u8 *hdr_data)
1392 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1393 hdr_len[0] = sizeof(struct vlan_ethhdr);
1395 hdr_len[0] = sizeof(struct ethhdr);
1397 if (skb->protocol == htons(ETH_P_IP)) {
1398 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1399 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1400 hdr_len[2] = tcp_hdrlen(skb);
1401 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1402 hdr_len[2] = sizeof(struct udphdr);
1403 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1404 hdr_len[1] = sizeof(struct ipv6hdr);
1405 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1406 hdr_len[2] = tcp_hdrlen(skb);
1407 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1408 hdr_len[2] = sizeof(struct udphdr);
1409 } else if (skb->protocol == htons(ETH_P_ARP)) {
1410 hdr_len[1] = arp_hdr_len(skb->dev);
1414 memset(hdr_data, 0, 120);
1415 if ((hdr_field >> 6) & 1) {
1416 hdr = skb_mac_header(skb);
1417 memcpy(hdr_data, hdr, hdr_len[0]);
1421 if ((hdr_field >> 5) & 1) {
1422 hdr = skb_network_header(skb);
1423 memcpy(hdr_data + len, hdr, hdr_len[1]);
1427 if ((hdr_field >> 4) & 1) {
1428 hdr = skb_transport_header(skb);
1429 memcpy(hdr_data + len, hdr, hdr_len[2]);
1436 * create_hdr_descs - create header and header extension descriptors
1437 * @hdr_field - bitfield determining needed headers
1438 * @data - buffer containing header data
1439 * @len - length of data buffer
1440 * @hdr_len - array of individual header lengths
1441 * @scrq_arr - descriptor array
1443 * Creates header and, if needed, header extension descriptors and
1444 * places them in a descriptor array, scrq_arr
1447 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1448 union sub_crq *scrq_arr)
1450 union sub_crq hdr_desc;
1456 while (tmp_len > 0) {
1457 cur = hdr_data + len - tmp_len;
1459 memset(&hdr_desc, 0, sizeof(hdr_desc));
1460 if (cur != hdr_data) {
1461 data = hdr_desc.hdr_ext.data;
1462 tmp = tmp_len > 29 ? 29 : tmp_len;
1463 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1464 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1465 hdr_desc.hdr_ext.len = tmp;
1467 data = hdr_desc.hdr.data;
1468 tmp = tmp_len > 24 ? 24 : tmp_len;
1469 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1470 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1471 hdr_desc.hdr.len = tmp;
1472 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1473 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1474 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1475 hdr_desc.hdr.flag = hdr_field << 1;
1477 memcpy(data, cur, tmp);
1479 *scrq_arr = hdr_desc;
1488 * build_hdr_descs_arr - build a header descriptor array
1489 * @skb - socket buffer
1490 * @num_entries - number of descriptors to be sent
1491 * @subcrq - first TX descriptor
1492 * @hdr_field - bit field determining which headers will be sent
1494 * This function will build a TX descriptor array with applicable
1495 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1498 static void build_hdr_descs_arr(struct sk_buff *skb,
1499 union sub_crq *indir_arr,
1500 int *num_entries, u8 hdr_field)
1502 int hdr_len[3] = {0, 0, 0};
1503 u8 hdr_data[140] = {0};
1506 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1508 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1512 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1513 struct net_device *netdev)
1515 /* For some backing devices, mishandling of small packets
1516 * can result in a loss of connection or TX stall. Device
1517 * architects recommend that no packet should be smaller
1518 * than the minimum MTU value provided to the driver, so
1519 * pad any packets to that length
1521 if (skb->len < netdev->min_mtu)
1522 return skb_put_padto(skb, netdev->min_mtu);
1527 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1528 struct ibmvnic_sub_crq_queue *tx_scrq)
1530 struct ibmvnic_ind_xmit_queue *ind_bufp;
1531 struct ibmvnic_tx_buff *tx_buff;
1532 struct ibmvnic_tx_pool *tx_pool;
1533 union sub_crq tx_scrq_entry;
1539 ind_bufp = &tx_scrq->ind_buf;
1540 entries = (u64)ind_bufp->index;
1541 queue_num = tx_scrq->pool_index;
1543 for (i = entries - 1; i >= 0; --i) {
1544 tx_scrq_entry = ind_bufp->indir_arr[i];
1545 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1547 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1548 if (index & IBMVNIC_TSO_POOL_MASK) {
1549 tx_pool = &adapter->tso_pool[queue_num];
1550 index &= ~IBMVNIC_TSO_POOL_MASK;
1552 tx_pool = &adapter->tx_pool[queue_num];
1554 tx_pool->free_map[tx_pool->consumer_index] = index;
1555 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1556 tx_pool->num_buffers - 1 :
1557 tx_pool->consumer_index - 1;
1558 tx_buff = &tx_pool->tx_buff[index];
1559 adapter->netdev->stats.tx_packets--;
1560 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1561 adapter->tx_stats_buffers[queue_num].packets--;
1562 adapter->tx_stats_buffers[queue_num].bytes -=
1564 dev_kfree_skb_any(tx_buff->skb);
1565 tx_buff->skb = NULL;
1566 adapter->netdev->stats.tx_dropped++;
1568 ind_bufp->index = 0;
1569 if (atomic_sub_return(entries, &tx_scrq->used) <=
1570 (adapter->req_tx_entries_per_subcrq / 2) &&
1571 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1572 netif_wake_subqueue(adapter->netdev, queue_num);
1573 netdev_dbg(adapter->netdev, "Started queue %d\n",
1578 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1579 struct ibmvnic_sub_crq_queue *tx_scrq)
1581 struct ibmvnic_ind_xmit_queue *ind_bufp;
1587 ind_bufp = &tx_scrq->ind_buf;
1588 dma_addr = (u64)ind_bufp->indir_dma;
1589 entries = (u64)ind_bufp->index;
1590 handle = tx_scrq->handle;
1594 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1596 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1598 ind_bufp->index = 0;
1602 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1604 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1605 int queue_num = skb_get_queue_mapping(skb);
1606 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1607 struct device *dev = &adapter->vdev->dev;
1608 struct ibmvnic_ind_xmit_queue *ind_bufp;
1609 struct ibmvnic_tx_buff *tx_buff = NULL;
1610 struct ibmvnic_sub_crq_queue *tx_scrq;
1611 struct ibmvnic_tx_pool *tx_pool;
1612 unsigned int tx_send_failed = 0;
1613 netdev_tx_t ret = NETDEV_TX_OK;
1614 unsigned int tx_map_failed = 0;
1615 union sub_crq indir_arr[16];
1616 unsigned int tx_dropped = 0;
1617 unsigned int tx_packets = 0;
1618 unsigned int tx_bytes = 0;
1619 dma_addr_t data_dma_addr;
1620 struct netdev_queue *txq;
1621 unsigned long lpar_rc;
1622 union sub_crq tx_crq;
1623 unsigned int offset;
1624 int num_entries = 1;
1629 tx_scrq = adapter->tx_scrq[queue_num];
1630 txq = netdev_get_tx_queue(netdev, queue_num);
1631 ind_bufp = &tx_scrq->ind_buf;
1633 if (test_bit(0, &adapter->resetting)) {
1634 if (!netif_subqueue_stopped(netdev, skb))
1635 netif_stop_subqueue(netdev, queue_num);
1636 dev_kfree_skb_any(skb);
1641 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1645 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1649 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1652 if (skb_is_gso(skb))
1653 tx_pool = &adapter->tso_pool[queue_num];
1655 tx_pool = &adapter->tx_pool[queue_num];
1657 index = tx_pool->free_map[tx_pool->consumer_index];
1659 if (index == IBMVNIC_INVALID_MAP) {
1660 dev_kfree_skb_any(skb);
1664 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1668 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1670 offset = index * tx_pool->buf_size;
1671 dst = tx_pool->long_term_buff.buff + offset;
1672 memset(dst, 0, tx_pool->buf_size);
1673 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1675 if (skb_shinfo(skb)->nr_frags) {
1679 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1680 cur = skb_headlen(skb);
1682 /* Copy the frags */
1683 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1684 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1687 page_address(skb_frag_page(frag)) +
1688 skb_frag_off(frag), skb_frag_size(frag));
1689 cur += skb_frag_size(frag);
1692 skb_copy_from_linear_data(skb, dst, skb->len);
1695 tx_pool->consumer_index =
1696 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1698 tx_buff = &tx_pool->tx_buff[index];
1700 tx_buff->index = index;
1701 tx_buff->pool_index = queue_num;
1703 memset(&tx_crq, 0, sizeof(tx_crq));
1704 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1705 tx_crq.v1.type = IBMVNIC_TX_DESC;
1706 tx_crq.v1.n_crq_elem = 1;
1707 tx_crq.v1.n_sge = 1;
1708 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1710 if (skb_is_gso(skb))
1711 tx_crq.v1.correlator =
1712 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1714 tx_crq.v1.correlator = cpu_to_be32(index);
1715 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1716 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1717 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1719 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1720 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1721 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1724 if (skb->protocol == htons(ETH_P_IP)) {
1725 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1726 proto = ip_hdr(skb)->protocol;
1727 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1728 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1729 proto = ipv6_hdr(skb)->nexthdr;
1732 if (proto == IPPROTO_TCP)
1733 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1734 else if (proto == IPPROTO_UDP)
1735 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1737 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1738 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1741 if (skb_is_gso(skb)) {
1742 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1743 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1747 if ((*hdrs >> 7) & 1)
1748 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1750 tx_crq.v1.n_crq_elem = num_entries;
1751 tx_buff->num_entries = num_entries;
1752 /* flush buffer if current entry can not fit */
1753 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1754 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1755 if (lpar_rc != H_SUCCESS)
1759 indir_arr[0] = tx_crq;
1760 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1761 num_entries * sizeof(struct ibmvnic_generic_scrq));
1762 ind_bufp->index += num_entries;
1763 if (__netdev_tx_sent_queue(txq, skb->len,
1764 netdev_xmit_more() &&
1765 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1766 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1767 if (lpar_rc != H_SUCCESS)
1771 if (atomic_add_return(num_entries, &tx_scrq->used)
1772 >= adapter->req_tx_entries_per_subcrq) {
1773 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1774 netif_stop_subqueue(netdev, queue_num);
1778 tx_bytes += skb->len;
1779 txq->trans_start = jiffies;
1784 dev_kfree_skb_any(skb);
1785 tx_buff->skb = NULL;
1786 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1787 tx_pool->num_buffers - 1 :
1788 tx_pool->consumer_index - 1;
1791 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1792 dev_err_ratelimited(dev, "tx: send failed\n");
1794 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1795 /* Disable TX and report carrier off if queue is closed
1796 * or pending failover.
1797 * Firmware guarantees that a signal will be sent to the
1798 * driver, triggering a reset or some other action.
1800 netif_tx_stop_all_queues(netdev);
1801 netif_carrier_off(netdev);
1804 netdev->stats.tx_dropped += tx_dropped;
1805 netdev->stats.tx_bytes += tx_bytes;
1806 netdev->stats.tx_packets += tx_packets;
1807 adapter->tx_send_failed += tx_send_failed;
1808 adapter->tx_map_failed += tx_map_failed;
1809 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1810 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1811 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1816 static void ibmvnic_set_multi(struct net_device *netdev)
1818 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1819 struct netdev_hw_addr *ha;
1820 union ibmvnic_crq crq;
1822 memset(&crq, 0, sizeof(crq));
1823 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1824 crq.request_capability.cmd = REQUEST_CAPABILITY;
1826 if (netdev->flags & IFF_PROMISC) {
1827 if (!adapter->promisc_supported)
1830 if (netdev->flags & IFF_ALLMULTI) {
1831 /* Accept all multicast */
1832 memset(&crq, 0, sizeof(crq));
1833 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1834 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1835 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1836 ibmvnic_send_crq(adapter, &crq);
1837 } else if (netdev_mc_empty(netdev)) {
1838 /* Reject all multicast */
1839 memset(&crq, 0, sizeof(crq));
1840 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1841 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1842 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1843 ibmvnic_send_crq(adapter, &crq);
1845 /* Accept one or more multicast(s) */
1846 netdev_for_each_mc_addr(ha, netdev) {
1847 memset(&crq, 0, sizeof(crq));
1848 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1849 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1850 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1851 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1853 ibmvnic_send_crq(adapter, &crq);
1859 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1861 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1862 union ibmvnic_crq crq;
1865 if (!is_valid_ether_addr(dev_addr)) {
1866 rc = -EADDRNOTAVAIL;
1870 memset(&crq, 0, sizeof(crq));
1871 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1872 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1873 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1875 mutex_lock(&adapter->fw_lock);
1876 adapter->fw_done_rc = 0;
1877 reinit_completion(&adapter->fw_done);
1879 rc = ibmvnic_send_crq(adapter, &crq);
1882 mutex_unlock(&adapter->fw_lock);
1886 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1887 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1888 if (rc || adapter->fw_done_rc) {
1890 mutex_unlock(&adapter->fw_lock);
1893 mutex_unlock(&adapter->fw_lock);
1896 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1900 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1902 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1903 struct sockaddr *addr = p;
1907 if (!is_valid_ether_addr(addr->sa_data))
1908 return -EADDRNOTAVAIL;
1910 if (adapter->state != VNIC_PROBED) {
1911 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1912 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1919 * do_change_param_reset returns zero if we are able to keep processing reset
1920 * events, or non-zero if we hit a fatal error and must halt.
1922 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1923 struct ibmvnic_rwi *rwi,
1926 struct net_device *netdev = adapter->netdev;
1929 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1932 netif_carrier_off(netdev);
1933 adapter->reset_reason = rwi->reset_reason;
1935 ibmvnic_cleanup(netdev);
1937 if (reset_state == VNIC_OPEN) {
1938 rc = __ibmvnic_close(netdev);
1943 release_resources(adapter);
1944 release_sub_crqs(adapter, 1);
1945 release_crq_queue(adapter);
1947 adapter->state = VNIC_PROBED;
1949 rc = init_crq_queue(adapter);
1952 netdev_err(adapter->netdev,
1953 "Couldn't initialize crq. rc=%d\n", rc);
1957 rc = ibmvnic_reset_init(adapter, true);
1959 rc = IBMVNIC_INIT_FAILED;
1963 /* If the adapter was in PROBE state prior to the reset,
1966 if (reset_state == VNIC_PROBED)
1969 rc = ibmvnic_login(netdev);
1974 rc = init_resources(adapter);
1978 ibmvnic_disable_irqs(adapter);
1980 adapter->state = VNIC_CLOSED;
1982 if (reset_state == VNIC_CLOSED)
1985 rc = __ibmvnic_open(netdev);
1987 rc = IBMVNIC_OPEN_FAILED;
1991 /* refresh device's multicast list */
1992 ibmvnic_set_multi(netdev);
1995 for (i = 0; i < adapter->req_rx_queues; i++)
1996 napi_schedule(&adapter->napi[i]);
2000 adapter->state = reset_state;
2005 * do_reset returns zero if we are able to keep processing reset events, or
2006 * non-zero if we hit a fatal error and must halt.
2008 static int do_reset(struct ibmvnic_adapter *adapter,
2009 struct ibmvnic_rwi *rwi, u32 reset_state)
2011 u64 old_num_rx_queues, old_num_tx_queues;
2012 u64 old_num_rx_slots, old_num_tx_slots;
2013 struct net_device *netdev = adapter->netdev;
2016 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
2021 * Now that we have the rtnl lock, clear any pending failover.
2022 * This will ensure ibmvnic_open() has either completed or will
2023 * block until failover is complete.
2025 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2026 adapter->failover_pending = false;
2028 netif_carrier_off(netdev);
2029 adapter->reset_reason = rwi->reset_reason;
2031 old_num_rx_queues = adapter->req_rx_queues;
2032 old_num_tx_queues = adapter->req_tx_queues;
2033 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2034 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2036 ibmvnic_cleanup(netdev);
2038 if (reset_state == VNIC_OPEN &&
2039 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2040 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2041 adapter->state = VNIC_CLOSING;
2043 /* Release the RTNL lock before link state change and
2044 * re-acquire after the link state change to allow
2045 * linkwatch_event to grab the RTNL lock and run during
2049 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2054 if (adapter->state != VNIC_CLOSING) {
2059 adapter->state = VNIC_CLOSED;
2062 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2063 /* remove the closed state so when we call open it appears
2064 * we are coming from the probed state.
2066 adapter->state = VNIC_PROBED;
2068 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2069 rc = ibmvnic_reenable_crq_queue(adapter);
2070 release_sub_crqs(adapter, 1);
2072 rc = ibmvnic_reset_crq(adapter);
2073 if (rc == H_CLOSED || rc == H_SUCCESS) {
2074 rc = vio_enable_interrupts(adapter->vdev);
2076 netdev_err(adapter->netdev,
2077 "Reset failed to enable interrupts. rc=%d\n",
2083 netdev_err(adapter->netdev,
2084 "Reset couldn't initialize crq. rc=%d\n", rc);
2088 rc = ibmvnic_reset_init(adapter, true);
2090 rc = IBMVNIC_INIT_FAILED;
2094 /* If the adapter was in PROBE state prior to the reset,
2097 if (reset_state == VNIC_PROBED) {
2102 rc = ibmvnic_login(netdev);
2107 if (adapter->req_rx_queues != old_num_rx_queues ||
2108 adapter->req_tx_queues != old_num_tx_queues ||
2109 adapter->req_rx_add_entries_per_subcrq !=
2111 adapter->req_tx_entries_per_subcrq !=
2113 !adapter->rx_pool ||
2114 !adapter->tso_pool ||
2115 !adapter->tx_pool) {
2116 release_rx_pools(adapter);
2117 release_tx_pools(adapter);
2118 release_napi(adapter);
2119 release_vpd_data(adapter);
2121 rc = init_resources(adapter);
2126 rc = reset_tx_pools(adapter);
2128 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2133 rc = reset_rx_pools(adapter);
2135 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2140 ibmvnic_disable_irqs(adapter);
2142 adapter->state = VNIC_CLOSED;
2144 if (reset_state == VNIC_CLOSED) {
2149 rc = __ibmvnic_open(netdev);
2151 rc = IBMVNIC_OPEN_FAILED;
2155 /* refresh device's multicast list */
2156 ibmvnic_set_multi(netdev);
2159 for (i = 0; i < adapter->req_rx_queues; i++)
2160 napi_schedule(&adapter->napi[i]);
2162 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2163 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2164 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2165 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2171 /* restore the adapter state if reset failed */
2173 adapter->state = reset_state;
2179 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2180 struct ibmvnic_rwi *rwi, u32 reset_state)
2182 struct net_device *netdev = adapter->netdev;
2185 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2188 netif_carrier_off(netdev);
2189 adapter->reset_reason = rwi->reset_reason;
2191 ibmvnic_cleanup(netdev);
2192 release_resources(adapter);
2193 release_sub_crqs(adapter, 0);
2194 release_crq_queue(adapter);
2196 /* remove the closed state so when we call open it appears
2197 * we are coming from the probed state.
2199 adapter->state = VNIC_PROBED;
2201 reinit_completion(&adapter->init_done);
2202 rc = init_crq_queue(adapter);
2204 netdev_err(adapter->netdev,
2205 "Couldn't initialize crq. rc=%d\n", rc);
2209 rc = ibmvnic_reset_init(adapter, false);
2213 /* If the adapter was in PROBE state prior to the reset,
2216 if (reset_state == VNIC_PROBED)
2219 rc = ibmvnic_login(netdev);
2223 rc = init_resources(adapter);
2227 ibmvnic_disable_irqs(adapter);
2228 adapter->state = VNIC_CLOSED;
2230 if (reset_state == VNIC_CLOSED)
2233 rc = __ibmvnic_open(netdev);
2235 rc = IBMVNIC_OPEN_FAILED;
2239 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2240 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2242 /* restore adapter state if reset failed */
2244 adapter->state = reset_state;
2248 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2250 struct ibmvnic_rwi *rwi;
2251 unsigned long flags;
2253 spin_lock_irqsave(&adapter->rwi_lock, flags);
2255 if (!list_empty(&adapter->rwi_list)) {
2256 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2258 list_del(&rwi->list);
2263 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2267 static void __ibmvnic_reset(struct work_struct *work)
2269 struct ibmvnic_rwi *rwi;
2270 struct ibmvnic_adapter *adapter;
2271 bool saved_state = false;
2272 unsigned long flags;
2276 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2278 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2279 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2280 IBMVNIC_RESET_DELAY);
2284 rwi = get_next_rwi(adapter);
2286 spin_lock_irqsave(&adapter->state_lock, flags);
2288 if (adapter->state == VNIC_REMOVING ||
2289 adapter->state == VNIC_REMOVED) {
2290 spin_unlock_irqrestore(&adapter->state_lock, flags);
2297 reset_state = adapter->state;
2300 spin_unlock_irqrestore(&adapter->state_lock, flags);
2302 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2303 /* CHANGE_PARAM requestor holds rtnl_lock */
2304 rc = do_change_param_reset(adapter, rwi, reset_state);
2305 } else if (adapter->force_reset_recovery) {
2307 * Since we are doing a hard reset now, clear the
2308 * failover_pending flag so we don't ignore any
2309 * future MOBILITY or other resets.
2311 adapter->failover_pending = false;
2313 /* Transport event occurred during previous reset */
2314 if (adapter->wait_for_reset) {
2315 /* Previous was CHANGE_PARAM; caller locked */
2316 adapter->force_reset_recovery = false;
2317 rc = do_hard_reset(adapter, rwi, reset_state);
2320 adapter->force_reset_recovery = false;
2321 rc = do_hard_reset(adapter, rwi, reset_state);
2325 /* give backing device time to settle down */
2326 netdev_dbg(adapter->netdev,
2327 "[S:%d] Hard reset failed, waiting 60 secs\n",
2329 set_current_state(TASK_UNINTERRUPTIBLE);
2330 schedule_timeout(60 * HZ);
2332 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2333 adapter->from_passive_init)) {
2334 rc = do_reset(adapter, rwi, reset_state);
2337 adapter->last_reset_time = jiffies;
2340 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2342 rwi = get_next_rwi(adapter);
2344 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2345 rwi->reset_reason == VNIC_RESET_MOBILITY))
2346 adapter->force_reset_recovery = true;
2349 if (adapter->wait_for_reset) {
2350 adapter->reset_done_rc = rc;
2351 complete(&adapter->reset_done);
2354 clear_bit_unlock(0, &adapter->resetting);
2357 static void __ibmvnic_delayed_reset(struct work_struct *work)
2359 struct ibmvnic_adapter *adapter;
2361 adapter = container_of(work, struct ibmvnic_adapter,
2362 ibmvnic_delayed_reset.work);
2363 __ibmvnic_reset(&adapter->ibmvnic_reset);
2366 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2367 enum ibmvnic_reset_reason reason)
2369 struct list_head *entry, *tmp_entry;
2370 struct ibmvnic_rwi *rwi, *tmp;
2371 struct net_device *netdev = adapter->netdev;
2372 unsigned long flags;
2376 * If failover is pending don't schedule any other reset.
2377 * Instead let the failover complete. If there is already a
2378 * a failover reset scheduled, we will detect and drop the
2379 * duplicate reset when walking the ->rwi_list below.
2381 if (adapter->state == VNIC_REMOVING ||
2382 adapter->state == VNIC_REMOVED ||
2383 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2385 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2389 if (adapter->state == VNIC_PROBING) {
2390 netdev_warn(netdev, "Adapter reset during probe\n");
2391 ret = adapter->init_done_rc = EAGAIN;
2395 spin_lock_irqsave(&adapter->rwi_lock, flags);
2397 list_for_each(entry, &adapter->rwi_list) {
2398 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2399 if (tmp->reset_reason == reason) {
2400 netdev_dbg(netdev, "Skipping matching reset\n");
2401 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2407 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2409 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2410 ibmvnic_close(netdev);
2414 /* if we just received a transport event,
2415 * flush reset queue and process this reset
2417 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2418 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2421 rwi->reset_reason = reason;
2422 list_add_tail(&rwi->list, &adapter->rwi_list);
2423 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2424 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2425 schedule_work(&adapter->ibmvnic_reset);
2432 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2434 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2436 if (test_bit(0, &adapter->resetting)) {
2437 netdev_err(adapter->netdev,
2438 "Adapter is resetting, skip timeout reset\n");
2441 /* No queuing up reset until at least 5 seconds (default watchdog val)
2444 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2445 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2448 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2451 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2452 struct ibmvnic_rx_buff *rx_buff)
2454 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2456 rx_buff->skb = NULL;
2458 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2459 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2461 atomic_dec(&pool->available);
2464 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2466 struct ibmvnic_sub_crq_queue *rx_scrq;
2467 struct ibmvnic_adapter *adapter;
2468 struct net_device *netdev;
2469 int frames_processed;
2473 adapter = netdev_priv(netdev);
2474 scrq_num = (int)(napi - adapter->napi);
2475 frames_processed = 0;
2476 rx_scrq = adapter->rx_scrq[scrq_num];
2479 while (frames_processed < budget) {
2480 struct sk_buff *skb;
2481 struct ibmvnic_rx_buff *rx_buff;
2482 union sub_crq *next;
2487 if (unlikely(test_bit(0, &adapter->resetting) &&
2488 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2489 enable_scrq_irq(adapter, rx_scrq);
2490 napi_complete_done(napi, frames_processed);
2491 return frames_processed;
2494 if (!pending_scrq(adapter, rx_scrq))
2496 /* The queue entry at the current index is peeked at above
2497 * to determine that there is a valid descriptor awaiting
2498 * processing. We want to be sure that the current slot
2499 * holds a valid descriptor before reading its contents.
2502 next = ibmvnic_next_scrq(adapter, rx_scrq);
2504 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2505 rx_comp.correlator);
2506 /* do error checking */
2507 if (next->rx_comp.rc) {
2508 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2509 be16_to_cpu(next->rx_comp.rc));
2510 /* free the entry */
2511 next->rx_comp.first = 0;
2512 dev_kfree_skb_any(rx_buff->skb);
2513 remove_buff_from_pool(adapter, rx_buff);
2515 } else if (!rx_buff->skb) {
2516 /* free the entry */
2517 next->rx_comp.first = 0;
2518 remove_buff_from_pool(adapter, rx_buff);
2522 length = be32_to_cpu(next->rx_comp.len);
2523 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2524 flags = next->rx_comp.flags;
2526 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2529 /* VLAN Header has been stripped by the system firmware and
2530 * needs to be inserted by the driver
2532 if (adapter->rx_vlan_header_insertion &&
2533 (flags & IBMVNIC_VLAN_STRIPPED))
2534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2535 ntohs(next->rx_comp.vlan_tci));
2537 /* free the entry */
2538 next->rx_comp.first = 0;
2539 remove_buff_from_pool(adapter, rx_buff);
2541 skb_put(skb, length);
2542 skb->protocol = eth_type_trans(skb, netdev);
2543 skb_record_rx_queue(skb, scrq_num);
2545 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2546 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2547 skb->ip_summed = CHECKSUM_UNNECESSARY;
2551 napi_gro_receive(napi, skb); /* send it up */
2552 netdev->stats.rx_packets++;
2553 netdev->stats.rx_bytes += length;
2554 adapter->rx_stats_buffers[scrq_num].packets++;
2555 adapter->rx_stats_buffers[scrq_num].bytes += length;
2559 if (adapter->state != VNIC_CLOSING &&
2560 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2561 adapter->req_rx_add_entries_per_subcrq / 2) ||
2562 frames_processed < budget))
2563 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2564 if (frames_processed < budget) {
2565 if (napi_complete_done(napi, frames_processed)) {
2566 enable_scrq_irq(adapter, rx_scrq);
2567 if (pending_scrq(adapter, rx_scrq)) {
2569 if (napi_reschedule(napi)) {
2570 disable_scrq_irq(adapter, rx_scrq);
2576 return frames_processed;
2579 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2583 adapter->fallback.mtu = adapter->req_mtu;
2584 adapter->fallback.rx_queues = adapter->req_rx_queues;
2585 adapter->fallback.tx_queues = adapter->req_tx_queues;
2586 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2587 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2589 reinit_completion(&adapter->reset_done);
2590 adapter->wait_for_reset = true;
2591 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2597 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2604 if (adapter->reset_done_rc) {
2606 adapter->desired.mtu = adapter->fallback.mtu;
2607 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2608 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2609 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2610 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2612 reinit_completion(&adapter->reset_done);
2613 adapter->wait_for_reset = true;
2614 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2619 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2627 adapter->wait_for_reset = false;
2632 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2634 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2636 adapter->desired.mtu = new_mtu + ETH_HLEN;
2638 return wait_for_reset(adapter);
2641 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2642 struct net_device *dev,
2643 netdev_features_t features)
2645 /* Some backing hardware adapters can not
2646 * handle packets with a MSS less than 224
2647 * or with only one segment.
2649 if (skb_is_gso(skb)) {
2650 if (skb_shinfo(skb)->gso_size < 224 ||
2651 skb_shinfo(skb)->gso_segs == 1)
2652 features &= ~NETIF_F_GSO_MASK;
2658 static const struct net_device_ops ibmvnic_netdev_ops = {
2659 .ndo_open = ibmvnic_open,
2660 .ndo_stop = ibmvnic_close,
2661 .ndo_start_xmit = ibmvnic_xmit,
2662 .ndo_set_rx_mode = ibmvnic_set_multi,
2663 .ndo_set_mac_address = ibmvnic_set_mac,
2664 .ndo_validate_addr = eth_validate_addr,
2665 .ndo_tx_timeout = ibmvnic_tx_timeout,
2666 .ndo_change_mtu = ibmvnic_change_mtu,
2667 .ndo_features_check = ibmvnic_features_check,
2670 /* ethtool functions */
2672 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2673 struct ethtool_link_ksettings *cmd)
2675 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2678 rc = send_query_phys_parms(adapter);
2680 adapter->speed = SPEED_UNKNOWN;
2681 adapter->duplex = DUPLEX_UNKNOWN;
2683 cmd->base.speed = adapter->speed;
2684 cmd->base.duplex = adapter->duplex;
2685 cmd->base.port = PORT_FIBRE;
2686 cmd->base.phy_address = 0;
2687 cmd->base.autoneg = AUTONEG_ENABLE;
2692 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2693 struct ethtool_drvinfo *info)
2695 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2697 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2698 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2699 strlcpy(info->fw_version, adapter->fw_version,
2700 sizeof(info->fw_version));
2703 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2705 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2707 return adapter->msg_enable;
2710 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2712 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2714 adapter->msg_enable = data;
2717 static u32 ibmvnic_get_link(struct net_device *netdev)
2719 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2721 /* Don't need to send a query because we request a logical link up at
2722 * init and then we wait for link state indications
2724 return adapter->logical_link_state;
2727 static void ibmvnic_get_ringparam(struct net_device *netdev,
2728 struct ethtool_ringparam *ring)
2730 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2732 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2733 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2734 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2736 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2737 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2739 ring->rx_mini_max_pending = 0;
2740 ring->rx_jumbo_max_pending = 0;
2741 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2742 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2743 ring->rx_mini_pending = 0;
2744 ring->rx_jumbo_pending = 0;
2747 static int ibmvnic_set_ringparam(struct net_device *netdev,
2748 struct ethtool_ringparam *ring)
2750 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2754 adapter->desired.rx_entries = ring->rx_pending;
2755 adapter->desired.tx_entries = ring->tx_pending;
2757 ret = wait_for_reset(adapter);
2760 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2761 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2763 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2764 ring->rx_pending, ring->tx_pending,
2765 adapter->req_rx_add_entries_per_subcrq,
2766 adapter->req_tx_entries_per_subcrq);
2770 static void ibmvnic_get_channels(struct net_device *netdev,
2771 struct ethtool_channels *channels)
2773 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2775 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2776 channels->max_rx = adapter->max_rx_queues;
2777 channels->max_tx = adapter->max_tx_queues;
2779 channels->max_rx = IBMVNIC_MAX_QUEUES;
2780 channels->max_tx = IBMVNIC_MAX_QUEUES;
2783 channels->max_other = 0;
2784 channels->max_combined = 0;
2785 channels->rx_count = adapter->req_rx_queues;
2786 channels->tx_count = adapter->req_tx_queues;
2787 channels->other_count = 0;
2788 channels->combined_count = 0;
2791 static int ibmvnic_set_channels(struct net_device *netdev,
2792 struct ethtool_channels *channels)
2794 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2798 adapter->desired.rx_queues = channels->rx_count;
2799 adapter->desired.tx_queues = channels->tx_count;
2801 ret = wait_for_reset(adapter);
2804 (adapter->req_rx_queues != channels->rx_count ||
2805 adapter->req_tx_queues != channels->tx_count))
2807 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2808 channels->rx_count, channels->tx_count,
2809 adapter->req_rx_queues, adapter->req_tx_queues);
2814 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2816 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2819 switch (stringset) {
2821 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2822 i++, data += ETH_GSTRING_LEN)
2823 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2825 for (i = 0; i < adapter->req_tx_queues; i++) {
2826 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2827 data += ETH_GSTRING_LEN;
2829 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2830 data += ETH_GSTRING_LEN;
2832 snprintf(data, ETH_GSTRING_LEN,
2833 "tx%d_dropped_packets", i);
2834 data += ETH_GSTRING_LEN;
2837 for (i = 0; i < adapter->req_rx_queues; i++) {
2838 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2839 data += ETH_GSTRING_LEN;
2841 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2842 data += ETH_GSTRING_LEN;
2844 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2845 data += ETH_GSTRING_LEN;
2849 case ETH_SS_PRIV_FLAGS:
2850 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2851 strcpy(data + i * ETH_GSTRING_LEN,
2852 ibmvnic_priv_flags[i]);
2859 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2861 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2865 return ARRAY_SIZE(ibmvnic_stats) +
2866 adapter->req_tx_queues * NUM_TX_STATS +
2867 adapter->req_rx_queues * NUM_RX_STATS;
2868 case ETH_SS_PRIV_FLAGS:
2869 return ARRAY_SIZE(ibmvnic_priv_flags);
2875 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2876 struct ethtool_stats *stats, u64 *data)
2878 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2879 union ibmvnic_crq crq;
2883 memset(&crq, 0, sizeof(crq));
2884 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2885 crq.request_statistics.cmd = REQUEST_STATISTICS;
2886 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2887 crq.request_statistics.len =
2888 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2890 /* Wait for data to be written */
2891 reinit_completion(&adapter->stats_done);
2892 rc = ibmvnic_send_crq(adapter, &crq);
2895 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2899 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2900 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2901 ibmvnic_stats[i].offset));
2903 for (j = 0; j < adapter->req_tx_queues; j++) {
2904 data[i] = adapter->tx_stats_buffers[j].packets;
2906 data[i] = adapter->tx_stats_buffers[j].bytes;
2908 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2912 for (j = 0; j < adapter->req_rx_queues; j++) {
2913 data[i] = adapter->rx_stats_buffers[j].packets;
2915 data[i] = adapter->rx_stats_buffers[j].bytes;
2917 data[i] = adapter->rx_stats_buffers[j].interrupts;
2922 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2924 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2926 return adapter->priv_flags;
2929 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2931 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2932 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2935 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2937 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2941 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2942 .get_drvinfo = ibmvnic_get_drvinfo,
2943 .get_msglevel = ibmvnic_get_msglevel,
2944 .set_msglevel = ibmvnic_set_msglevel,
2945 .get_link = ibmvnic_get_link,
2946 .get_ringparam = ibmvnic_get_ringparam,
2947 .set_ringparam = ibmvnic_set_ringparam,
2948 .get_channels = ibmvnic_get_channels,
2949 .set_channels = ibmvnic_set_channels,
2950 .get_strings = ibmvnic_get_strings,
2951 .get_sset_count = ibmvnic_get_sset_count,
2952 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2953 .get_link_ksettings = ibmvnic_get_link_ksettings,
2954 .get_priv_flags = ibmvnic_get_priv_flags,
2955 .set_priv_flags = ibmvnic_set_priv_flags,
2958 /* Routines for managing CRQs/sCRQs */
2960 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2961 struct ibmvnic_sub_crq_queue *scrq)
2966 netdev_dbg(adapter->netdev,
2967 "Invalid scrq reset. irq (%d) or msgs (%p).\n",
2968 scrq->irq, scrq->msgs);
2973 free_irq(scrq->irq, scrq);
2974 irq_dispose_mapping(scrq->irq);
2979 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2980 atomic_set(&scrq->used, 0);
2982 scrq->ind_buf.index = 0;
2984 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2988 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2989 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2993 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2997 if (!adapter->tx_scrq || !adapter->rx_scrq)
3000 for (i = 0; i < adapter->req_tx_queues; i++) {
3001 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3002 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3007 for (i = 0; i < adapter->req_rx_queues; i++) {
3008 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3009 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3017 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3018 struct ibmvnic_sub_crq_queue *scrq,
3021 struct device *dev = &adapter->vdev->dev;
3024 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3027 /* Close the sub-crqs */
3029 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3030 adapter->vdev->unit_address,
3032 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3035 netdev_err(adapter->netdev,
3036 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3041 dma_free_coherent(dev,
3043 scrq->ind_buf.indir_arr,
3044 scrq->ind_buf.indir_dma);
3046 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3048 free_pages((unsigned long)scrq->msgs, 2);
3052 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3055 struct device *dev = &adapter->vdev->dev;
3056 struct ibmvnic_sub_crq_queue *scrq;
3059 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3064 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3066 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3067 goto zero_page_failed;
3070 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3072 if (dma_mapping_error(dev, scrq->msg_token)) {
3073 dev_warn(dev, "Couldn't map crq queue messages page\n");
3077 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3078 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3080 if (rc == H_RESOURCE)
3081 rc = ibmvnic_reset_crq(adapter);
3083 if (rc == H_CLOSED) {
3084 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3086 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3090 scrq->adapter = adapter;
3091 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3092 scrq->ind_buf.index = 0;
3094 scrq->ind_buf.indir_arr =
3095 dma_alloc_coherent(dev,
3097 &scrq->ind_buf.indir_dma,
3100 if (!scrq->ind_buf.indir_arr)
3103 spin_lock_init(&scrq->lock);
3105 netdev_dbg(adapter->netdev,
3106 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3107 scrq->crq_num, scrq->hw_irq, scrq->irq);
3113 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3114 adapter->vdev->unit_address,
3116 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3118 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3121 free_pages((unsigned long)scrq->msgs, 2);
3128 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3132 if (adapter->tx_scrq) {
3133 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3134 if (!adapter->tx_scrq[i])
3137 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3139 if (adapter->tx_scrq[i]->irq) {
3140 free_irq(adapter->tx_scrq[i]->irq,
3141 adapter->tx_scrq[i]);
3142 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3143 adapter->tx_scrq[i]->irq = 0;
3146 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3150 kfree(adapter->tx_scrq);
3151 adapter->tx_scrq = NULL;
3152 adapter->num_active_tx_scrqs = 0;
3155 if (adapter->rx_scrq) {
3156 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3157 if (!adapter->rx_scrq[i])
3160 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3162 if (adapter->rx_scrq[i]->irq) {
3163 free_irq(adapter->rx_scrq[i]->irq,
3164 adapter->rx_scrq[i]);
3165 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3166 adapter->rx_scrq[i]->irq = 0;
3169 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3173 kfree(adapter->rx_scrq);
3174 adapter->rx_scrq = NULL;
3175 adapter->num_active_rx_scrqs = 0;
3179 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3180 struct ibmvnic_sub_crq_queue *scrq)
3182 struct device *dev = &adapter->vdev->dev;
3185 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3186 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3188 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3193 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3194 struct ibmvnic_sub_crq_queue *scrq)
3196 struct device *dev = &adapter->vdev->dev;
3199 if (scrq->hw_irq > 0x100000000ULL) {
3200 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3204 if (test_bit(0, &adapter->resetting) &&
3205 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3206 u64 val = (0xff000000) | scrq->hw_irq;
3208 rc = plpar_hcall_norets(H_EOI, val);
3209 /* H_EOI would fail with rc = H_FUNCTION when running
3210 * in XIVE mode which is expected, but not an error.
3212 if (rc && (rc != H_FUNCTION))
3213 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3217 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3218 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3220 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3225 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3226 struct ibmvnic_sub_crq_queue *scrq)
3228 struct device *dev = &adapter->vdev->dev;
3229 struct ibmvnic_tx_pool *tx_pool;
3230 struct ibmvnic_tx_buff *txbuff;
3231 struct netdev_queue *txq;
3232 union sub_crq *next;
3237 while (pending_scrq(adapter, scrq)) {
3238 unsigned int pool = scrq->pool_index;
3239 int num_entries = 0;
3240 int total_bytes = 0;
3241 int num_packets = 0;
3243 /* The queue entry at the current index is peeked at above
3244 * to determine that there is a valid descriptor awaiting
3245 * processing. We want to be sure that the current slot
3246 * holds a valid descriptor before reading its contents.
3250 next = ibmvnic_next_scrq(adapter, scrq);
3251 for (i = 0; i < next->tx_comp.num_comps; i++) {
3252 if (next->tx_comp.rcs[i])
3253 dev_err(dev, "tx error %x\n",
3254 next->tx_comp.rcs[i]);
3255 index = be32_to_cpu(next->tx_comp.correlators[i]);
3256 if (index & IBMVNIC_TSO_POOL_MASK) {
3257 tx_pool = &adapter->tso_pool[pool];
3258 index &= ~IBMVNIC_TSO_POOL_MASK;
3260 tx_pool = &adapter->tx_pool[pool];
3263 txbuff = &tx_pool->tx_buff[index];
3265 num_entries += txbuff->num_entries;
3267 total_bytes += txbuff->skb->len;
3268 dev_consume_skb_irq(txbuff->skb);
3271 netdev_warn(adapter->netdev,
3272 "TX completion received with NULL socket buffer\n");
3274 tx_pool->free_map[tx_pool->producer_index] = index;
3275 tx_pool->producer_index =
3276 (tx_pool->producer_index + 1) %
3277 tx_pool->num_buffers;
3279 /* remove tx_comp scrq*/
3280 next->tx_comp.first = 0;
3282 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3283 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3285 if (atomic_sub_return(num_entries, &scrq->used) <=
3286 (adapter->req_tx_entries_per_subcrq / 2) &&
3287 __netif_subqueue_stopped(adapter->netdev,
3288 scrq->pool_index)) {
3289 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3290 netdev_dbg(adapter->netdev, "Started queue %d\n",
3295 enable_scrq_irq(adapter, scrq);
3297 if (pending_scrq(adapter, scrq)) {
3298 disable_scrq_irq(adapter, scrq);
3305 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3307 struct ibmvnic_sub_crq_queue *scrq = instance;
3308 struct ibmvnic_adapter *adapter = scrq->adapter;
3310 disable_scrq_irq(adapter, scrq);
3311 ibmvnic_complete_tx(adapter, scrq);
3316 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3318 struct ibmvnic_sub_crq_queue *scrq = instance;
3319 struct ibmvnic_adapter *adapter = scrq->adapter;
3321 /* When booting a kdump kernel we can hit pending interrupts
3322 * prior to completing driver initialization.
3324 if (unlikely(adapter->state != VNIC_OPEN))
3327 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3329 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3330 disable_scrq_irq(adapter, scrq);
3331 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3337 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3339 struct device *dev = &adapter->vdev->dev;
3340 struct ibmvnic_sub_crq_queue *scrq;
3344 for (i = 0; i < adapter->req_tx_queues; i++) {
3345 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3347 scrq = adapter->tx_scrq[i];
3348 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3352 dev_err(dev, "Error mapping irq\n");
3353 goto req_tx_irq_failed;
3356 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3357 adapter->vdev->unit_address, i);
3358 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3359 0, scrq->name, scrq);
3362 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3364 irq_dispose_mapping(scrq->irq);
3365 goto req_tx_irq_failed;
3369 for (i = 0; i < adapter->req_rx_queues; i++) {
3370 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3372 scrq = adapter->rx_scrq[i];
3373 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3376 dev_err(dev, "Error mapping irq\n");
3377 goto req_rx_irq_failed;
3379 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3380 adapter->vdev->unit_address, i);
3381 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3382 0, scrq->name, scrq);
3384 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3386 irq_dispose_mapping(scrq->irq);
3387 goto req_rx_irq_failed;
3393 for (j = 0; j < i; j++) {
3394 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3395 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3397 i = adapter->req_tx_queues;
3399 for (j = 0; j < i; j++) {
3400 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3401 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3403 release_sub_crqs(adapter, 1);
3407 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3409 struct device *dev = &adapter->vdev->dev;
3410 struct ibmvnic_sub_crq_queue **allqueues;
3411 int registered_queues = 0;
3416 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3418 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3422 for (i = 0; i < total_queues; i++) {
3423 allqueues[i] = init_sub_crq_queue(adapter);
3424 if (!allqueues[i]) {
3425 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3428 registered_queues++;
3431 /* Make sure we were able to register the minimum number of queues */
3432 if (registered_queues <
3433 adapter->min_tx_queues + adapter->min_rx_queues) {
3434 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3438 /* Distribute the failed allocated queues*/
3439 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3440 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3443 if (adapter->req_rx_queues > adapter->min_rx_queues)
3444 adapter->req_rx_queues--;
3449 if (adapter->req_tx_queues > adapter->min_tx_queues)
3450 adapter->req_tx_queues--;
3457 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3458 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3459 if (!adapter->tx_scrq)
3462 for (i = 0; i < adapter->req_tx_queues; i++) {
3463 adapter->tx_scrq[i] = allqueues[i];
3464 adapter->tx_scrq[i]->pool_index = i;
3465 adapter->num_active_tx_scrqs++;
3468 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3469 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3470 if (!adapter->rx_scrq)
3473 for (i = 0; i < adapter->req_rx_queues; i++) {
3474 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3475 adapter->rx_scrq[i]->scrq_num = i;
3476 adapter->num_active_rx_scrqs++;
3483 kfree(adapter->tx_scrq);
3484 adapter->tx_scrq = NULL;
3486 for (i = 0; i < registered_queues; i++)
3487 release_sub_crq_queue(adapter, allqueues[i], 1);
3492 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3494 struct device *dev = &adapter->vdev->dev;
3495 union ibmvnic_crq crq;
3499 /* Sub-CRQ entries are 32 byte long */
3500 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3502 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3503 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3504 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3508 if (adapter->desired.mtu)
3509 adapter->req_mtu = adapter->desired.mtu;
3511 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3513 if (!adapter->desired.tx_entries)
3514 adapter->desired.tx_entries =
3515 adapter->max_tx_entries_per_subcrq;
3516 if (!adapter->desired.rx_entries)
3517 adapter->desired.rx_entries =
3518 adapter->max_rx_add_entries_per_subcrq;
3520 max_entries = IBMVNIC_MAX_LTB_SIZE /
3521 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3523 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3524 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3525 adapter->desired.tx_entries = max_entries;
3528 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3529 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3530 adapter->desired.rx_entries = max_entries;
3533 if (adapter->desired.tx_entries)
3534 adapter->req_tx_entries_per_subcrq =
3535 adapter->desired.tx_entries;
3537 adapter->req_tx_entries_per_subcrq =
3538 adapter->max_tx_entries_per_subcrq;
3540 if (adapter->desired.rx_entries)
3541 adapter->req_rx_add_entries_per_subcrq =
3542 adapter->desired.rx_entries;
3544 adapter->req_rx_add_entries_per_subcrq =
3545 adapter->max_rx_add_entries_per_subcrq;
3547 if (adapter->desired.tx_queues)
3548 adapter->req_tx_queues =
3549 adapter->desired.tx_queues;
3551 adapter->req_tx_queues =
3552 adapter->opt_tx_comp_sub_queues;
3554 if (adapter->desired.rx_queues)
3555 adapter->req_rx_queues =
3556 adapter->desired.rx_queues;
3558 adapter->req_rx_queues =
3559 adapter->opt_rx_comp_queues;
3561 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3564 memset(&crq, 0, sizeof(crq));
3565 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3566 crq.request_capability.cmd = REQUEST_CAPABILITY;
3568 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3569 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3570 atomic_inc(&adapter->running_cap_crqs);
3571 ibmvnic_send_crq(adapter, &crq);
3573 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3574 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3575 atomic_inc(&adapter->running_cap_crqs);
3576 ibmvnic_send_crq(adapter, &crq);
3578 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3579 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3580 atomic_inc(&adapter->running_cap_crqs);
3581 ibmvnic_send_crq(adapter, &crq);
3583 crq.request_capability.capability =
3584 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3585 crq.request_capability.number =
3586 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3587 atomic_inc(&adapter->running_cap_crqs);
3588 ibmvnic_send_crq(adapter, &crq);
3590 crq.request_capability.capability =
3591 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3592 crq.request_capability.number =
3593 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3594 atomic_inc(&adapter->running_cap_crqs);
3595 ibmvnic_send_crq(adapter, &crq);
3597 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3598 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3599 atomic_inc(&adapter->running_cap_crqs);
3600 ibmvnic_send_crq(adapter, &crq);
3602 if (adapter->netdev->flags & IFF_PROMISC) {
3603 if (adapter->promisc_supported) {
3604 crq.request_capability.capability =
3605 cpu_to_be16(PROMISC_REQUESTED);
3606 crq.request_capability.number = cpu_to_be64(1);
3607 atomic_inc(&adapter->running_cap_crqs);
3608 ibmvnic_send_crq(adapter, &crq);
3611 crq.request_capability.capability =
3612 cpu_to_be16(PROMISC_REQUESTED);
3613 crq.request_capability.number = cpu_to_be64(0);
3614 atomic_inc(&adapter->running_cap_crqs);
3615 ibmvnic_send_crq(adapter, &crq);
3619 static int pending_scrq(struct ibmvnic_adapter *adapter,
3620 struct ibmvnic_sub_crq_queue *scrq)
3622 union sub_crq *entry = &scrq->msgs[scrq->cur];
3624 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3630 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3631 struct ibmvnic_sub_crq_queue *scrq)
3633 union sub_crq *entry;
3634 unsigned long flags;
3636 spin_lock_irqsave(&scrq->lock, flags);
3637 entry = &scrq->msgs[scrq->cur];
3638 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3639 if (++scrq->cur == scrq->size)
3644 spin_unlock_irqrestore(&scrq->lock, flags);
3646 /* Ensure that the entire buffer descriptor has been
3647 * loaded before reading its contents
3654 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3656 struct ibmvnic_crq_queue *queue = &adapter->crq;
3657 union ibmvnic_crq *crq;
3659 crq = &queue->msgs[queue->cur];
3660 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3661 if (++queue->cur == queue->size)
3670 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3674 dev_warn_ratelimited(dev,
3675 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3679 dev_warn_ratelimited(dev,
3680 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3684 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3689 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3690 u64 remote_handle, u64 ioba, u64 num_entries)
3692 unsigned int ua = adapter->vdev->unit_address;
3693 struct device *dev = &adapter->vdev->dev;
3696 /* Make sure the hypervisor sees the complete request */
3698 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3699 cpu_to_be64(remote_handle),
3703 print_subcrq_error(dev, rc, __func__);
3708 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3709 union ibmvnic_crq *crq)
3711 unsigned int ua = adapter->vdev->unit_address;
3712 struct device *dev = &adapter->vdev->dev;
3713 u64 *u64_crq = (u64 *)crq;
3716 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3717 (unsigned long int)cpu_to_be64(u64_crq[0]),
3718 (unsigned long int)cpu_to_be64(u64_crq[1]));
3720 if (!adapter->crq.active &&
3721 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3722 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3726 /* Make sure the hypervisor sees the complete request */
3729 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3730 cpu_to_be64(u64_crq[0]),
3731 cpu_to_be64(u64_crq[1]));
3734 if (rc == H_CLOSED) {
3735 dev_warn(dev, "CRQ Queue closed\n");
3736 /* do not reset, report the fail, wait for passive init from server */
3739 dev_warn(dev, "Send error (rc=%d)\n", rc);
3745 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3747 struct device *dev = &adapter->vdev->dev;
3748 union ibmvnic_crq crq;
3752 memset(&crq, 0, sizeof(crq));
3753 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3754 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3755 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3758 rc = ibmvnic_send_crq(adapter, &crq);
3764 } while (retries > 0);
3767 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3774 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3776 union ibmvnic_crq crq;
3778 memset(&crq, 0, sizeof(crq));
3779 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3780 crq.version_exchange.cmd = VERSION_EXCHANGE;
3781 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3783 return ibmvnic_send_crq(adapter, &crq);
3786 struct vnic_login_client_data {
3792 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3796 /* Calculate the amount of buffer space needed for the
3797 * vnic client data in the login buffer. There are four entries,
3798 * OS name, LPAR name, device name, and a null last entry.
3800 len = 4 * sizeof(struct vnic_login_client_data);
3801 len += 6; /* "Linux" plus NULL */
3802 len += strlen(utsname()->nodename) + 1;
3803 len += strlen(adapter->netdev->name) + 1;
3808 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3809 struct vnic_login_client_data *vlcd)
3811 const char *os_name = "Linux";
3814 /* Type 1 - LPAR OS */
3816 len = strlen(os_name) + 1;
3817 vlcd->len = cpu_to_be16(len);
3818 strncpy(vlcd->name, os_name, len);
3819 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3821 /* Type 2 - LPAR name */
3823 len = strlen(utsname()->nodename) + 1;
3824 vlcd->len = cpu_to_be16(len);
3825 strncpy(vlcd->name, utsname()->nodename, len);
3826 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3828 /* Type 3 - device name */
3830 len = strlen(adapter->netdev->name) + 1;
3831 vlcd->len = cpu_to_be16(len);
3832 strncpy(vlcd->name, adapter->netdev->name, len);
3835 static int send_login(struct ibmvnic_adapter *adapter)
3837 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3838 struct ibmvnic_login_buffer *login_buffer;
3839 struct device *dev = &adapter->vdev->dev;
3840 struct vnic_login_client_data *vlcd;
3841 dma_addr_t rsp_buffer_token;
3842 dma_addr_t buffer_token;
3843 size_t rsp_buffer_size;
3844 union ibmvnic_crq crq;
3845 int client_data_len;
3852 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3853 netdev_err(adapter->netdev,
3854 "RX or TX queues are not allocated, device login failed\n");
3858 release_login_rsp_buffer(adapter);
3859 client_data_len = vnic_client_data_len(adapter);
3862 sizeof(struct ibmvnic_login_buffer) +
3863 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3866 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3868 goto buf_alloc_failed;
3870 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3872 if (dma_mapping_error(dev, buffer_token)) {
3873 dev_err(dev, "Couldn't map login buffer\n");
3874 goto buf_map_failed;
3877 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3878 sizeof(u64) * adapter->req_tx_queues +
3879 sizeof(u64) * adapter->req_rx_queues +
3880 sizeof(u64) * adapter->req_rx_queues +
3881 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3883 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3884 if (!login_rsp_buffer)
3885 goto buf_rsp_alloc_failed;
3887 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3888 rsp_buffer_size, DMA_FROM_DEVICE);
3889 if (dma_mapping_error(dev, rsp_buffer_token)) {
3890 dev_err(dev, "Couldn't map login rsp buffer\n");
3891 goto buf_rsp_map_failed;
3894 adapter->login_buf = login_buffer;
3895 adapter->login_buf_token = buffer_token;
3896 adapter->login_buf_sz = buffer_size;
3897 adapter->login_rsp_buf = login_rsp_buffer;
3898 adapter->login_rsp_buf_token = rsp_buffer_token;
3899 adapter->login_rsp_buf_sz = rsp_buffer_size;
3901 login_buffer->len = cpu_to_be32(buffer_size);
3902 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3903 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3904 login_buffer->off_txcomp_subcrqs =
3905 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3906 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3907 login_buffer->off_rxcomp_subcrqs =
3908 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3909 sizeof(u64) * adapter->req_tx_queues);
3910 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3911 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3913 tx_list_p = (__be64 *)((char *)login_buffer +
3914 sizeof(struct ibmvnic_login_buffer));
3915 rx_list_p = (__be64 *)((char *)login_buffer +
3916 sizeof(struct ibmvnic_login_buffer) +
3917 sizeof(u64) * adapter->req_tx_queues);
3919 for (i = 0; i < adapter->req_tx_queues; i++) {
3920 if (adapter->tx_scrq[i]) {
3921 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3926 for (i = 0; i < adapter->req_rx_queues; i++) {
3927 if (adapter->rx_scrq[i]) {
3928 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3933 /* Insert vNIC login client data */
3934 vlcd = (struct vnic_login_client_data *)
3935 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3936 login_buffer->client_data_offset =
3937 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3938 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3940 vnic_add_client_data(adapter, vlcd);
3942 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3943 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3944 netdev_dbg(adapter->netdev, "%016lx\n",
3945 ((unsigned long int *)(adapter->login_buf))[i]);
3948 memset(&crq, 0, sizeof(crq));
3949 crq.login.first = IBMVNIC_CRQ_CMD;
3950 crq.login.cmd = LOGIN;
3951 crq.login.ioba = cpu_to_be32(buffer_token);
3952 crq.login.len = cpu_to_be32(buffer_size);
3954 adapter->login_pending = true;
3955 rc = ibmvnic_send_crq(adapter, &crq);
3957 adapter->login_pending = false;
3958 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3959 goto buf_rsp_map_failed;
3965 kfree(login_rsp_buffer);
3966 adapter->login_rsp_buf = NULL;
3967 buf_rsp_alloc_failed:
3968 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3970 kfree(login_buffer);
3971 adapter->login_buf = NULL;
3976 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3979 union ibmvnic_crq crq;
3981 memset(&crq, 0, sizeof(crq));
3982 crq.request_map.first = IBMVNIC_CRQ_CMD;
3983 crq.request_map.cmd = REQUEST_MAP;
3984 crq.request_map.map_id = map_id;
3985 crq.request_map.ioba = cpu_to_be32(addr);
3986 crq.request_map.len = cpu_to_be32(len);
3987 return ibmvnic_send_crq(adapter, &crq);
3990 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3992 union ibmvnic_crq crq;
3994 memset(&crq, 0, sizeof(crq));
3995 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3996 crq.request_unmap.cmd = REQUEST_UNMAP;
3997 crq.request_unmap.map_id = map_id;
3998 return ibmvnic_send_crq(adapter, &crq);
4001 static void send_query_map(struct ibmvnic_adapter *adapter)
4003 union ibmvnic_crq crq;
4005 memset(&crq, 0, sizeof(crq));
4006 crq.query_map.first = IBMVNIC_CRQ_CMD;
4007 crq.query_map.cmd = QUERY_MAP;
4008 ibmvnic_send_crq(adapter, &crq);
4011 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4012 static void send_query_cap(struct ibmvnic_adapter *adapter)
4014 union ibmvnic_crq crq;
4016 atomic_set(&adapter->running_cap_crqs, 0);
4017 memset(&crq, 0, sizeof(crq));
4018 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4019 crq.query_capability.cmd = QUERY_CAPABILITY;
4021 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4022 atomic_inc(&adapter->running_cap_crqs);
4023 ibmvnic_send_crq(adapter, &crq);
4025 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4026 atomic_inc(&adapter->running_cap_crqs);
4027 ibmvnic_send_crq(adapter, &crq);
4029 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4030 atomic_inc(&adapter->running_cap_crqs);
4031 ibmvnic_send_crq(adapter, &crq);
4033 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4034 atomic_inc(&adapter->running_cap_crqs);
4035 ibmvnic_send_crq(adapter, &crq);
4037 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4038 atomic_inc(&adapter->running_cap_crqs);
4039 ibmvnic_send_crq(adapter, &crq);
4041 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4042 atomic_inc(&adapter->running_cap_crqs);
4043 ibmvnic_send_crq(adapter, &crq);
4045 crq.query_capability.capability =
4046 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4047 atomic_inc(&adapter->running_cap_crqs);
4048 ibmvnic_send_crq(adapter, &crq);
4050 crq.query_capability.capability =
4051 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4052 atomic_inc(&adapter->running_cap_crqs);
4053 ibmvnic_send_crq(adapter, &crq);
4055 crq.query_capability.capability =
4056 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4057 atomic_inc(&adapter->running_cap_crqs);
4058 ibmvnic_send_crq(adapter, &crq);
4060 crq.query_capability.capability =
4061 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4062 atomic_inc(&adapter->running_cap_crqs);
4063 ibmvnic_send_crq(adapter, &crq);
4065 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4066 atomic_inc(&adapter->running_cap_crqs);
4067 ibmvnic_send_crq(adapter, &crq);
4069 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4070 atomic_inc(&adapter->running_cap_crqs);
4071 ibmvnic_send_crq(adapter, &crq);
4073 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4074 atomic_inc(&adapter->running_cap_crqs);
4075 ibmvnic_send_crq(adapter, &crq);
4077 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4078 atomic_inc(&adapter->running_cap_crqs);
4079 ibmvnic_send_crq(adapter, &crq);
4081 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4082 atomic_inc(&adapter->running_cap_crqs);
4083 ibmvnic_send_crq(adapter, &crq);
4085 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4086 atomic_inc(&adapter->running_cap_crqs);
4087 ibmvnic_send_crq(adapter, &crq);
4089 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4090 atomic_inc(&adapter->running_cap_crqs);
4091 ibmvnic_send_crq(adapter, &crq);
4093 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4094 atomic_inc(&adapter->running_cap_crqs);
4095 ibmvnic_send_crq(adapter, &crq);
4097 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4098 atomic_inc(&adapter->running_cap_crqs);
4099 ibmvnic_send_crq(adapter, &crq);
4101 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4102 atomic_inc(&adapter->running_cap_crqs);
4103 ibmvnic_send_crq(adapter, &crq);
4105 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4106 atomic_inc(&adapter->running_cap_crqs);
4107 ibmvnic_send_crq(adapter, &crq);
4109 crq.query_capability.capability =
4110 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4111 atomic_inc(&adapter->running_cap_crqs);
4112 ibmvnic_send_crq(adapter, &crq);
4114 crq.query_capability.capability =
4115 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4116 atomic_inc(&adapter->running_cap_crqs);
4117 ibmvnic_send_crq(adapter, &crq);
4119 crq.query_capability.capability =
4120 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4121 atomic_inc(&adapter->running_cap_crqs);
4122 ibmvnic_send_crq(adapter, &crq);
4124 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4125 atomic_inc(&adapter->running_cap_crqs);
4126 ibmvnic_send_crq(adapter, &crq);
4129 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4131 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4132 struct device *dev = &adapter->vdev->dev;
4133 union ibmvnic_crq crq;
4135 adapter->ip_offload_tok =
4137 &adapter->ip_offload_buf,
4141 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4142 if (!firmware_has_feature(FW_FEATURE_CMO))
4143 dev_err(dev, "Couldn't map offload buffer\n");
4147 memset(&crq, 0, sizeof(crq));
4148 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4149 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4150 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4151 crq.query_ip_offload.ioba =
4152 cpu_to_be32(adapter->ip_offload_tok);
4154 ibmvnic_send_crq(adapter, &crq);
4157 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4159 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4160 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4161 struct device *dev = &adapter->vdev->dev;
4162 netdev_features_t old_hw_features = 0;
4163 union ibmvnic_crq crq;
4165 adapter->ip_offload_ctrl_tok =
4168 sizeof(adapter->ip_offload_ctrl),
4171 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4172 dev_err(dev, "Couldn't map ip offload control buffer\n");
4176 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4177 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4178 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4179 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4180 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4181 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4182 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4183 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4184 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4185 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4187 /* large_rx disabled for now, additional features needed */
4188 ctrl_buf->large_rx_ipv4 = 0;
4189 ctrl_buf->large_rx_ipv6 = 0;
4191 if (adapter->state != VNIC_PROBING) {
4192 old_hw_features = adapter->netdev->hw_features;
4193 adapter->netdev->hw_features = 0;
4196 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4198 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4199 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4201 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4202 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4204 if ((adapter->netdev->features &
4205 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4206 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4208 if (buf->large_tx_ipv4)
4209 adapter->netdev->hw_features |= NETIF_F_TSO;
4210 if (buf->large_tx_ipv6)
4211 adapter->netdev->hw_features |= NETIF_F_TSO6;
4213 if (adapter->state == VNIC_PROBING) {
4214 adapter->netdev->features |= adapter->netdev->hw_features;
4215 } else if (old_hw_features != adapter->netdev->hw_features) {
4216 netdev_features_t tmp = 0;
4218 /* disable features no longer supported */
4219 adapter->netdev->features &= adapter->netdev->hw_features;
4220 /* turn on features now supported if previously enabled */
4221 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4222 adapter->netdev->hw_features;
4223 adapter->netdev->features |=
4224 tmp & adapter->netdev->wanted_features;
4227 memset(&crq, 0, sizeof(crq));
4228 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4229 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4230 crq.control_ip_offload.len =
4231 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4232 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4233 ibmvnic_send_crq(adapter, &crq);
4236 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4237 struct ibmvnic_adapter *adapter)
4239 struct device *dev = &adapter->vdev->dev;
4241 if (crq->get_vpd_size_rsp.rc.code) {
4242 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4243 crq->get_vpd_size_rsp.rc.code);
4244 complete(&adapter->fw_done);
4248 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4249 complete(&adapter->fw_done);
4252 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4253 struct ibmvnic_adapter *adapter)
4255 struct device *dev = &adapter->vdev->dev;
4256 unsigned char *substr = NULL;
4257 u8 fw_level_len = 0;
4259 memset(adapter->fw_version, 0, 32);
4261 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4264 if (crq->get_vpd_rsp.rc.code) {
4265 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4266 crq->get_vpd_rsp.rc.code);
4270 /* get the position of the firmware version info
4271 * located after the ASCII 'RM' substring in the buffer
4273 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4275 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4279 /* get length of firmware level ASCII substring */
4280 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4281 fw_level_len = *(substr + 2);
4283 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4287 /* copy firmware version string from vpd into adapter */
4288 if ((substr + 3 + fw_level_len) <
4289 (adapter->vpd->buff + adapter->vpd->len)) {
4290 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4292 dev_info(dev, "FW substr extrapolated VPD buff\n");
4296 if (adapter->fw_version[0] == '\0')
4297 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4298 complete(&adapter->fw_done);
4301 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4303 struct device *dev = &adapter->vdev->dev;
4304 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4307 dma_unmap_single(dev, adapter->ip_offload_tok,
4308 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4310 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4311 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4312 netdev_dbg(adapter->netdev, "%016lx\n",
4313 ((unsigned long int *)(buf))[i]);
4315 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4316 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4317 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4318 buf->tcp_ipv4_chksum);
4319 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4320 buf->tcp_ipv6_chksum);
4321 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4322 buf->udp_ipv4_chksum);
4323 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4324 buf->udp_ipv6_chksum);
4325 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4326 buf->large_tx_ipv4);
4327 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4328 buf->large_tx_ipv6);
4329 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4330 buf->large_rx_ipv4);
4331 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4332 buf->large_rx_ipv6);
4333 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4334 buf->max_ipv4_header_size);
4335 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4336 buf->max_ipv6_header_size);
4337 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4338 buf->max_tcp_header_size);
4339 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4340 buf->max_udp_header_size);
4341 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4342 buf->max_large_tx_size);
4343 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4344 buf->max_large_rx_size);
4345 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4346 buf->ipv6_extension_header);
4347 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4348 buf->tcp_pseudosum_req);
4349 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4350 buf->num_ipv6_ext_headers);
4351 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4352 buf->off_ipv6_ext_headers);
4354 send_control_ip_offload(adapter);
4357 static const char *ibmvnic_fw_err_cause(u16 cause)
4360 case ADAPTER_PROBLEM:
4361 return "adapter problem";
4363 return "bus problem";
4365 return "firmware problem";
4367 return "device driver problem";
4369 return "EEH recovery";
4371 return "firmware updated";
4373 return "low Memory";
4379 static void handle_error_indication(union ibmvnic_crq *crq,
4380 struct ibmvnic_adapter *adapter)
4382 struct device *dev = &adapter->vdev->dev;
4385 cause = be16_to_cpu(crq->error_indication.error_cause);
4387 dev_warn_ratelimited(dev,
4388 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4389 crq->error_indication.flags
4390 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4391 ibmvnic_fw_err_cause(cause));
4393 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4394 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4396 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4399 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4400 struct ibmvnic_adapter *adapter)
4402 struct net_device *netdev = adapter->netdev;
4403 struct device *dev = &adapter->vdev->dev;
4406 rc = crq->change_mac_addr_rsp.rc.code;
4408 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4411 /* crq->change_mac_addr.mac_addr is the requested one
4412 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4414 ether_addr_copy(netdev->dev_addr,
4415 &crq->change_mac_addr_rsp.mac_addr[0]);
4416 ether_addr_copy(adapter->mac_addr,
4417 &crq->change_mac_addr_rsp.mac_addr[0]);
4419 complete(&adapter->fw_done);
4423 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4424 struct ibmvnic_adapter *adapter)
4426 struct device *dev = &adapter->vdev->dev;
4430 atomic_dec(&adapter->running_cap_crqs);
4431 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4433 req_value = &adapter->req_tx_queues;
4437 req_value = &adapter->req_rx_queues;
4440 case REQ_RX_ADD_QUEUES:
4441 req_value = &adapter->req_rx_add_queues;
4444 case REQ_TX_ENTRIES_PER_SUBCRQ:
4445 req_value = &adapter->req_tx_entries_per_subcrq;
4446 name = "tx_entries_per_subcrq";
4448 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4449 req_value = &adapter->req_rx_add_entries_per_subcrq;
4450 name = "rx_add_entries_per_subcrq";
4453 req_value = &adapter->req_mtu;
4456 case PROMISC_REQUESTED:
4457 req_value = &adapter->promisc;
4461 dev_err(dev, "Got invalid cap request rsp %d\n",
4462 crq->request_capability.capability);
4466 switch (crq->request_capability_rsp.rc.code) {
4469 case PARTIALSUCCESS:
4470 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4472 (long int)be64_to_cpu(crq->request_capability_rsp.
4475 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4477 pr_err("mtu of %llu is not supported. Reverting.\n",
4479 *req_value = adapter->fallback.mtu;
4482 be64_to_cpu(crq->request_capability_rsp.number);
4485 send_request_cap(adapter, 1);
4488 dev_err(dev, "Error %d in request cap rsp\n",
4489 crq->request_capability_rsp.rc.code);
4493 /* Done receiving requested capabilities, query IP offload support */
4494 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4495 adapter->wait_capability = false;
4496 send_query_ip_offload(adapter);
4500 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4501 struct ibmvnic_adapter *adapter)
4503 struct device *dev = &adapter->vdev->dev;
4504 struct net_device *netdev = adapter->netdev;
4505 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4506 struct ibmvnic_login_buffer *login = adapter->login_buf;
4507 u64 *tx_handle_array;
4508 u64 *rx_handle_array;
4514 /* CHECK: Test/set of login_pending does not need to be atomic
4515 * because only ibmvnic_tasklet tests/clears this.
4517 if (!adapter->login_pending) {
4518 netdev_warn(netdev, "Ignoring unexpected login response\n");
4521 adapter->login_pending = false;
4523 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4525 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4526 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4528 /* If the number of queues requested can't be allocated by the
4529 * server, the login response will return with code 1. We will need
4530 * to resend the login buffer with fewer queues requested.
4532 if (login_rsp_crq->generic.rc.code) {
4533 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4534 complete(&adapter->init_done);
4538 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4540 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4541 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4542 netdev_dbg(adapter->netdev, "%016lx\n",
4543 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4547 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4548 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4549 adapter->req_rx_add_queues !=
4550 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4551 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4552 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4555 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4556 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4557 /* variable buffer sizes are not supported, so just read the
4560 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4562 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4563 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4565 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4566 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4567 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4568 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4570 for (i = 0; i < num_tx_pools; i++)
4571 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4573 for (i = 0; i < num_rx_pools; i++)
4574 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4576 adapter->num_active_tx_scrqs = num_tx_pools;
4577 adapter->num_active_rx_scrqs = num_rx_pools;
4578 release_login_rsp_buffer(adapter);
4579 release_login_buffer(adapter);
4580 complete(&adapter->init_done);
4585 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4586 struct ibmvnic_adapter *adapter)
4588 struct device *dev = &adapter->vdev->dev;
4591 rc = crq->request_unmap_rsp.rc.code;
4593 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4596 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4597 struct ibmvnic_adapter *adapter)
4599 struct net_device *netdev = adapter->netdev;
4600 struct device *dev = &adapter->vdev->dev;
4603 rc = crq->query_map_rsp.rc.code;
4605 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4608 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4609 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4610 crq->query_map_rsp.free_pages);
4613 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4614 struct ibmvnic_adapter *adapter)
4616 struct net_device *netdev = adapter->netdev;
4617 struct device *dev = &adapter->vdev->dev;
4620 atomic_dec(&adapter->running_cap_crqs);
4621 netdev_dbg(netdev, "Outstanding queries: %d\n",
4622 atomic_read(&adapter->running_cap_crqs));
4623 rc = crq->query_capability.rc.code;
4625 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4629 switch (be16_to_cpu(crq->query_capability.capability)) {
4631 adapter->min_tx_queues =
4632 be64_to_cpu(crq->query_capability.number);
4633 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4634 adapter->min_tx_queues);
4637 adapter->min_rx_queues =
4638 be64_to_cpu(crq->query_capability.number);
4639 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4640 adapter->min_rx_queues);
4642 case MIN_RX_ADD_QUEUES:
4643 adapter->min_rx_add_queues =
4644 be64_to_cpu(crq->query_capability.number);
4645 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4646 adapter->min_rx_add_queues);
4649 adapter->max_tx_queues =
4650 be64_to_cpu(crq->query_capability.number);
4651 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4652 adapter->max_tx_queues);
4655 adapter->max_rx_queues =
4656 be64_to_cpu(crq->query_capability.number);
4657 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4658 adapter->max_rx_queues);
4660 case MAX_RX_ADD_QUEUES:
4661 adapter->max_rx_add_queues =
4662 be64_to_cpu(crq->query_capability.number);
4663 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4664 adapter->max_rx_add_queues);
4666 case MIN_TX_ENTRIES_PER_SUBCRQ:
4667 adapter->min_tx_entries_per_subcrq =
4668 be64_to_cpu(crq->query_capability.number);
4669 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4670 adapter->min_tx_entries_per_subcrq);
4672 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4673 adapter->min_rx_add_entries_per_subcrq =
4674 be64_to_cpu(crq->query_capability.number);
4675 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4676 adapter->min_rx_add_entries_per_subcrq);
4678 case MAX_TX_ENTRIES_PER_SUBCRQ:
4679 adapter->max_tx_entries_per_subcrq =
4680 be64_to_cpu(crq->query_capability.number);
4681 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4682 adapter->max_tx_entries_per_subcrq);
4684 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4685 adapter->max_rx_add_entries_per_subcrq =
4686 be64_to_cpu(crq->query_capability.number);
4687 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4688 adapter->max_rx_add_entries_per_subcrq);
4690 case TCP_IP_OFFLOAD:
4691 adapter->tcp_ip_offload =
4692 be64_to_cpu(crq->query_capability.number);
4693 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4694 adapter->tcp_ip_offload);
4696 case PROMISC_SUPPORTED:
4697 adapter->promisc_supported =
4698 be64_to_cpu(crq->query_capability.number);
4699 netdev_dbg(netdev, "promisc_supported = %lld\n",
4700 adapter->promisc_supported);
4703 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4704 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4705 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4708 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4709 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4710 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4712 case MAX_MULTICAST_FILTERS:
4713 adapter->max_multicast_filters =
4714 be64_to_cpu(crq->query_capability.number);
4715 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4716 adapter->max_multicast_filters);
4718 case VLAN_HEADER_INSERTION:
4719 adapter->vlan_header_insertion =
4720 be64_to_cpu(crq->query_capability.number);
4721 if (adapter->vlan_header_insertion)
4722 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4723 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4724 adapter->vlan_header_insertion);
4726 case RX_VLAN_HEADER_INSERTION:
4727 adapter->rx_vlan_header_insertion =
4728 be64_to_cpu(crq->query_capability.number);
4729 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4730 adapter->rx_vlan_header_insertion);
4732 case MAX_TX_SG_ENTRIES:
4733 adapter->max_tx_sg_entries =
4734 be64_to_cpu(crq->query_capability.number);
4735 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4736 adapter->max_tx_sg_entries);
4738 case RX_SG_SUPPORTED:
4739 adapter->rx_sg_supported =
4740 be64_to_cpu(crq->query_capability.number);
4741 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4742 adapter->rx_sg_supported);
4744 case OPT_TX_COMP_SUB_QUEUES:
4745 adapter->opt_tx_comp_sub_queues =
4746 be64_to_cpu(crq->query_capability.number);
4747 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4748 adapter->opt_tx_comp_sub_queues);
4750 case OPT_RX_COMP_QUEUES:
4751 adapter->opt_rx_comp_queues =
4752 be64_to_cpu(crq->query_capability.number);
4753 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4754 adapter->opt_rx_comp_queues);
4756 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4757 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4758 be64_to_cpu(crq->query_capability.number);
4759 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4760 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4762 case OPT_TX_ENTRIES_PER_SUBCRQ:
4763 adapter->opt_tx_entries_per_subcrq =
4764 be64_to_cpu(crq->query_capability.number);
4765 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4766 adapter->opt_tx_entries_per_subcrq);
4768 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4769 adapter->opt_rxba_entries_per_subcrq =
4770 be64_to_cpu(crq->query_capability.number);
4771 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4772 adapter->opt_rxba_entries_per_subcrq);
4774 case TX_RX_DESC_REQ:
4775 adapter->tx_rx_desc_req = crq->query_capability.number;
4776 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4777 adapter->tx_rx_desc_req);
4781 netdev_err(netdev, "Got invalid cap rsp %d\n",
4782 crq->query_capability.capability);
4786 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4787 adapter->wait_capability = false;
4788 send_request_cap(adapter, 0);
4792 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4794 union ibmvnic_crq crq;
4797 memset(&crq, 0, sizeof(crq));
4798 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4799 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4801 mutex_lock(&adapter->fw_lock);
4802 adapter->fw_done_rc = 0;
4803 reinit_completion(&adapter->fw_done);
4805 rc = ibmvnic_send_crq(adapter, &crq);
4807 mutex_unlock(&adapter->fw_lock);
4811 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4813 mutex_unlock(&adapter->fw_lock);
4817 mutex_unlock(&adapter->fw_lock);
4818 return adapter->fw_done_rc ? -EIO : 0;
4821 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4822 struct ibmvnic_adapter *adapter)
4824 struct net_device *netdev = adapter->netdev;
4826 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4828 rc = crq->query_phys_parms_rsp.rc.code;
4830 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4834 case IBMVNIC_10MBPS:
4835 adapter->speed = SPEED_10;
4837 case IBMVNIC_100MBPS:
4838 adapter->speed = SPEED_100;
4841 adapter->speed = SPEED_1000;
4843 case IBMVNIC_10GBPS:
4844 adapter->speed = SPEED_10000;
4846 case IBMVNIC_25GBPS:
4847 adapter->speed = SPEED_25000;
4849 case IBMVNIC_40GBPS:
4850 adapter->speed = SPEED_40000;
4852 case IBMVNIC_50GBPS:
4853 adapter->speed = SPEED_50000;
4855 case IBMVNIC_100GBPS:
4856 adapter->speed = SPEED_100000;
4858 case IBMVNIC_200GBPS:
4859 adapter->speed = SPEED_200000;
4862 if (netif_carrier_ok(netdev))
4863 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4864 adapter->speed = SPEED_UNKNOWN;
4866 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4867 adapter->duplex = DUPLEX_FULL;
4868 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4869 adapter->duplex = DUPLEX_HALF;
4871 adapter->duplex = DUPLEX_UNKNOWN;
4876 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4877 struct ibmvnic_adapter *adapter)
4879 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4880 struct net_device *netdev = adapter->netdev;
4881 struct device *dev = &adapter->vdev->dev;
4882 u64 *u64_crq = (u64 *)crq;
4885 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4886 (unsigned long int)cpu_to_be64(u64_crq[0]),
4887 (unsigned long int)cpu_to_be64(u64_crq[1]));
4888 switch (gen_crq->first) {
4889 case IBMVNIC_CRQ_INIT_RSP:
4890 switch (gen_crq->cmd) {
4891 case IBMVNIC_CRQ_INIT:
4892 dev_info(dev, "Partner initialized\n");
4893 adapter->from_passive_init = true;
4894 /* Discard any stale login responses from prev reset.
4895 * CHECK: should we clear even on INIT_COMPLETE?
4897 adapter->login_pending = false;
4899 if (!completion_done(&adapter->init_done)) {
4900 complete(&adapter->init_done);
4901 adapter->init_done_rc = -EIO;
4903 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4905 case IBMVNIC_CRQ_INIT_COMPLETE:
4906 dev_info(dev, "Partner initialization complete\n");
4907 adapter->crq.active = true;
4908 send_version_xchg(adapter);
4911 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4914 case IBMVNIC_CRQ_XPORT_EVENT:
4915 netif_carrier_off(netdev);
4916 adapter->crq.active = false;
4917 /* terminate any thread waiting for a response
4920 if (!completion_done(&adapter->fw_done)) {
4921 adapter->fw_done_rc = -EIO;
4922 complete(&adapter->fw_done);
4924 if (!completion_done(&adapter->stats_done))
4925 complete(&adapter->stats_done);
4926 if (test_bit(0, &adapter->resetting))
4927 adapter->force_reset_recovery = true;
4928 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4929 dev_info(dev, "Migrated, re-enabling adapter\n");
4930 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4931 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4932 dev_info(dev, "Backing device failover detected\n");
4933 adapter->failover_pending = true;
4935 /* The adapter lost the connection */
4936 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4938 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4941 case IBMVNIC_CRQ_CMD_RSP:
4944 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4949 switch (gen_crq->cmd) {
4950 case VERSION_EXCHANGE_RSP:
4951 rc = crq->version_exchange_rsp.rc.code;
4953 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4957 be16_to_cpu(crq->version_exchange_rsp.version);
4958 dev_info(dev, "Partner protocol version is %d\n",
4960 send_query_cap(adapter);
4962 case QUERY_CAPABILITY_RSP:
4963 handle_query_cap_rsp(crq, adapter);
4966 handle_query_map_rsp(crq, adapter);
4968 case REQUEST_MAP_RSP:
4969 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4970 complete(&adapter->fw_done);
4972 case REQUEST_UNMAP_RSP:
4973 handle_request_unmap_rsp(crq, adapter);
4975 case REQUEST_CAPABILITY_RSP:
4976 handle_request_cap_rsp(crq, adapter);
4979 netdev_dbg(netdev, "Got Login Response\n");
4980 handle_login_rsp(crq, adapter);
4982 case LOGICAL_LINK_STATE_RSP:
4984 "Got Logical Link State Response, state: %d rc: %d\n",
4985 crq->logical_link_state_rsp.link_state,
4986 crq->logical_link_state_rsp.rc.code);
4987 adapter->logical_link_state =
4988 crq->logical_link_state_rsp.link_state;
4989 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4990 complete(&adapter->init_done);
4992 case LINK_STATE_INDICATION:
4993 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4994 adapter->phys_link_state =
4995 crq->link_state_indication.phys_link_state;
4996 adapter->logical_link_state =
4997 crq->link_state_indication.logical_link_state;
4998 if (adapter->phys_link_state && adapter->logical_link_state)
4999 netif_carrier_on(netdev);
5001 netif_carrier_off(netdev);
5003 case CHANGE_MAC_ADDR_RSP:
5004 netdev_dbg(netdev, "Got MAC address change Response\n");
5005 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5007 case ERROR_INDICATION:
5008 netdev_dbg(netdev, "Got Error Indication\n");
5009 handle_error_indication(crq, adapter);
5011 case REQUEST_STATISTICS_RSP:
5012 netdev_dbg(netdev, "Got Statistics Response\n");
5013 complete(&adapter->stats_done);
5015 case QUERY_IP_OFFLOAD_RSP:
5016 netdev_dbg(netdev, "Got Query IP offload Response\n");
5017 handle_query_ip_offload_rsp(adapter);
5019 case MULTICAST_CTRL_RSP:
5020 netdev_dbg(netdev, "Got multicast control Response\n");
5022 case CONTROL_IP_OFFLOAD_RSP:
5023 netdev_dbg(netdev, "Got Control IP offload Response\n");
5024 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5025 sizeof(adapter->ip_offload_ctrl),
5027 complete(&adapter->init_done);
5029 case COLLECT_FW_TRACE_RSP:
5030 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5031 complete(&adapter->fw_done);
5033 case GET_VPD_SIZE_RSP:
5034 handle_vpd_size_rsp(crq, adapter);
5037 handle_vpd_rsp(crq, adapter);
5039 case QUERY_PHYS_PARMS_RSP:
5040 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5041 complete(&adapter->fw_done);
5044 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5049 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5051 struct ibmvnic_adapter *adapter = instance;
5053 tasklet_schedule(&adapter->tasklet);
5057 static void ibmvnic_tasklet(struct tasklet_struct *t)
5059 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5060 struct ibmvnic_crq_queue *queue = &adapter->crq;
5061 union ibmvnic_crq *crq;
5062 unsigned long flags;
5065 spin_lock_irqsave(&queue->lock, flags);
5067 /* Pull all the valid messages off the CRQ */
5068 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5069 ibmvnic_handle_crq(crq, adapter);
5070 crq->generic.first = 0;
5073 /* remain in tasklet until all
5074 * capabilities responses are received
5076 if (!adapter->wait_capability)
5079 /* if capabilities CRQ's were sent in this tasklet, the following
5080 * tasklet must wait until all responses are received
5082 if (atomic_read(&adapter->running_cap_crqs) != 0)
5083 adapter->wait_capability = true;
5084 spin_unlock_irqrestore(&queue->lock, flags);
5087 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5089 struct vio_dev *vdev = adapter->vdev;
5093 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5094 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5097 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5102 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5104 struct ibmvnic_crq_queue *crq = &adapter->crq;
5105 struct device *dev = &adapter->vdev->dev;
5106 struct vio_dev *vdev = adapter->vdev;
5111 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5112 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5114 /* Clean out the queue */
5118 memset(crq->msgs, 0, PAGE_SIZE);
5120 crq->active = false;
5122 /* And re-open it again */
5123 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5124 crq->msg_token, PAGE_SIZE);
5127 /* Adapter is good, but other end is not ready */
5128 dev_warn(dev, "Partner adapter not ready\n");
5130 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5135 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5137 struct ibmvnic_crq_queue *crq = &adapter->crq;
5138 struct vio_dev *vdev = adapter->vdev;
5144 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5145 free_irq(vdev->irq, adapter);
5146 tasklet_kill(&adapter->tasklet);
5148 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5149 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5151 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5153 free_page((unsigned long)crq->msgs);
5155 crq->active = false;
5158 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5160 struct ibmvnic_crq_queue *crq = &adapter->crq;
5161 struct device *dev = &adapter->vdev->dev;
5162 struct vio_dev *vdev = adapter->vdev;
5163 int rc, retrc = -ENOMEM;
5168 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5169 /* Should we allocate more than one page? */
5174 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5175 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5177 if (dma_mapping_error(dev, crq->msg_token))
5180 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5181 crq->msg_token, PAGE_SIZE);
5183 if (rc == H_RESOURCE)
5184 /* maybe kexecing and resource is busy. try a reset */
5185 rc = ibmvnic_reset_crq(adapter);
5188 if (rc == H_CLOSED) {
5189 dev_warn(dev, "Partner adapter not ready\n");
5191 dev_warn(dev, "Error %d opening adapter\n", rc);
5192 goto reg_crq_failed;
5197 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5199 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5200 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5201 adapter->vdev->unit_address);
5202 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5204 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5206 goto req_irq_failed;
5209 rc = vio_enable_interrupts(vdev);
5211 dev_err(dev, "Error %d enabling interrupts\n", rc);
5212 goto req_irq_failed;
5216 spin_lock_init(&crq->lock);
5221 tasklet_kill(&adapter->tasklet);
5223 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5224 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5226 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5228 free_page((unsigned long)crq->msgs);
5233 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5235 struct device *dev = &adapter->vdev->dev;
5236 unsigned long timeout = msecs_to_jiffies(20000);
5237 u64 old_num_rx_queues, old_num_tx_queues;
5240 adapter->from_passive_init = false;
5243 old_num_rx_queues = adapter->req_rx_queues;
5244 old_num_tx_queues = adapter->req_tx_queues;
5245 reinit_completion(&adapter->init_done);
5248 adapter->init_done_rc = 0;
5249 rc = ibmvnic_send_crq_init(adapter);
5251 dev_err(dev, "Send crq init failed with error %d\n", rc);
5255 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5256 dev_err(dev, "Initialization sequence timed out\n");
5260 if (adapter->init_done_rc) {
5261 release_crq_queue(adapter);
5262 return adapter->init_done_rc;
5265 if (adapter->from_passive_init) {
5266 adapter->state = VNIC_OPEN;
5267 adapter->from_passive_init = false;
5272 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5273 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5274 if (adapter->req_rx_queues != old_num_rx_queues ||
5275 adapter->req_tx_queues != old_num_tx_queues) {
5276 release_sub_crqs(adapter, 0);
5277 rc = init_sub_crqs(adapter);
5279 rc = reset_sub_crq_queues(adapter);
5282 rc = init_sub_crqs(adapter);
5286 dev_err(dev, "Initialization of sub crqs failed\n");
5287 release_crq_queue(adapter);
5291 rc = init_sub_crq_irqs(adapter);
5293 dev_err(dev, "Failed to initialize sub crq irqs\n");
5294 release_crq_queue(adapter);
5300 static struct device_attribute dev_attr_failover;
5302 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5304 struct ibmvnic_adapter *adapter;
5305 struct net_device *netdev;
5306 unsigned char *mac_addr_p;
5309 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5312 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5313 VETH_MAC_ADDR, NULL);
5316 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5317 __FILE__, __LINE__);
5321 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5322 IBMVNIC_MAX_QUEUES);
5326 adapter = netdev_priv(netdev);
5327 adapter->state = VNIC_PROBING;
5328 dev_set_drvdata(&dev->dev, netdev);
5329 adapter->vdev = dev;
5330 adapter->netdev = netdev;
5331 adapter->login_pending = false;
5333 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5334 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5335 netdev->irq = dev->irq;
5336 netdev->netdev_ops = &ibmvnic_netdev_ops;
5337 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5338 SET_NETDEV_DEV(netdev, &dev->dev);
5340 spin_lock_init(&adapter->stats_lock);
5342 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5343 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5344 __ibmvnic_delayed_reset);
5345 INIT_LIST_HEAD(&adapter->rwi_list);
5346 spin_lock_init(&adapter->rwi_lock);
5347 spin_lock_init(&adapter->state_lock);
5348 mutex_init(&adapter->fw_lock);
5349 init_completion(&adapter->init_done);
5350 init_completion(&adapter->fw_done);
5351 init_completion(&adapter->reset_done);
5352 init_completion(&adapter->stats_done);
5353 clear_bit(0, &adapter->resetting);
5356 rc = init_crq_queue(adapter);
5358 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5360 goto ibmvnic_init_fail;
5363 rc = ibmvnic_reset_init(adapter, false);
5364 if (rc && rc != EAGAIN)
5365 goto ibmvnic_init_fail;
5366 } while (rc == EAGAIN);
5368 rc = init_stats_buffers(adapter);
5370 goto ibmvnic_init_fail;
5372 rc = init_stats_token(adapter);
5374 goto ibmvnic_stats_fail;
5376 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5377 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5378 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5380 rc = device_create_file(&dev->dev, &dev_attr_failover);
5382 goto ibmvnic_dev_file_err;
5384 netif_carrier_off(netdev);
5385 rc = register_netdev(netdev);
5387 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5388 goto ibmvnic_register_fail;
5390 dev_info(&dev->dev, "ibmvnic registered\n");
5392 adapter->state = VNIC_PROBED;
5394 adapter->wait_for_reset = false;
5395 adapter->last_reset_time = jiffies;
5398 ibmvnic_register_fail:
5399 device_remove_file(&dev->dev, &dev_attr_failover);
5401 ibmvnic_dev_file_err:
5402 release_stats_token(adapter);
5405 release_stats_buffers(adapter);
5408 release_sub_crqs(adapter, 1);
5409 release_crq_queue(adapter);
5410 mutex_destroy(&adapter->fw_lock);
5411 free_netdev(netdev);
5416 static int ibmvnic_remove(struct vio_dev *dev)
5418 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5419 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5420 unsigned long flags;
5422 spin_lock_irqsave(&adapter->state_lock, flags);
5423 if (test_bit(0, &adapter->resetting)) {
5424 spin_unlock_irqrestore(&adapter->state_lock, flags);
5428 adapter->state = VNIC_REMOVING;
5429 spin_unlock_irqrestore(&adapter->state_lock, flags);
5431 flush_work(&adapter->ibmvnic_reset);
5432 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5435 unregister_netdevice(netdev);
5437 release_resources(adapter);
5438 release_sub_crqs(adapter, 1);
5439 release_crq_queue(adapter);
5441 release_stats_token(adapter);
5442 release_stats_buffers(adapter);
5444 adapter->state = VNIC_REMOVED;
5447 mutex_destroy(&adapter->fw_lock);
5448 device_remove_file(&dev->dev, &dev_attr_failover);
5449 free_netdev(netdev);
5450 dev_set_drvdata(&dev->dev, NULL);
5455 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5456 const char *buf, size_t count)
5458 struct net_device *netdev = dev_get_drvdata(dev);
5459 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5460 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5461 __be64 session_token;
5464 if (!sysfs_streq(buf, "1"))
5467 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5468 H_GET_SESSION_TOKEN, 0, 0, 0);
5470 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5475 session_token = (__be64)retbuf[0];
5476 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5477 be64_to_cpu(session_token));
5478 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5479 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5481 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5489 static DEVICE_ATTR_WO(failover);
5491 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5493 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5494 struct ibmvnic_adapter *adapter;
5495 struct iommu_table *tbl;
5496 unsigned long ret = 0;
5499 tbl = get_iommu_table_base(&vdev->dev);
5501 /* netdev inits at probe time along with the structures we need below*/
5503 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5505 adapter = netdev_priv(netdev);
5507 ret += PAGE_SIZE; /* the crq message queue */
5508 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5510 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5511 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5513 for (i = 0; i < adapter->num_active_rx_pools; i++)
5514 ret += adapter->rx_pool[i].size *
5515 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5520 static int ibmvnic_resume(struct device *dev)
5522 struct net_device *netdev = dev_get_drvdata(dev);
5523 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5525 if (adapter->state != VNIC_OPEN)
5528 tasklet_schedule(&adapter->tasklet);
5533 static const struct vio_device_id ibmvnic_device_table[] = {
5534 {"network", "IBM,vnic"},
5537 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5539 static const struct dev_pm_ops ibmvnic_pm_ops = {
5540 .resume = ibmvnic_resume
5543 static struct vio_driver ibmvnic_driver = {
5544 .id_table = ibmvnic_device_table,
5545 .probe = ibmvnic_probe,
5546 .remove = ibmvnic_remove,
5547 .get_desired_dma = ibmvnic_get_desired_dma,
5548 .name = ibmvnic_driver_name,
5549 .pm = &ibmvnic_pm_ops,
5552 /* module functions */
5553 static int __init ibmvnic_module_init(void)
5555 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5556 IBMVNIC_DRIVER_VERSION);
5558 return vio_register_driver(&ibmvnic_driver);
5561 static void __exit ibmvnic_module_exit(void)
5563 vio_unregister_driver(&ibmvnic_driver);
5566 module_init(ibmvnic_module_init);
5567 module_exit(ibmvnic_module_exit);