1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
64 #include <asm/iommu.h>
65 #include <linux/uaccess.h>
66 #include <asm/firmware.h>
67 #include <linux/workqueue.h>
68 #include <linux/if_vlan.h>
69 #include <linux/utsname.h>
73 static const char ibmvnic_driver_name[] = "ibmvnic";
74 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
76 MODULE_AUTHOR("Santiago Leon");
77 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
81 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
89 static int enable_scrq_irq(struct ibmvnic_adapter *,
90 struct ibmvnic_sub_crq_queue *);
91 static int disable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int pending_scrq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static int ibmvnic_poll(struct napi_struct *napi, int data);
98 static void send_query_map(struct ibmvnic_adapter *adapter);
99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
100 static int send_request_unmap(struct ibmvnic_adapter *, u8);
101 static int send_login(struct ibmvnic_adapter *adapter);
102 static void send_query_cap(struct ibmvnic_adapter *adapter);
103 static int init_sub_crqs(struct ibmvnic_adapter *);
104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
106 static void release_crq_queue(struct ibmvnic_adapter *);
107 static int __ibmvnic_set_mac(struct net_device *, u8 *);
108 static int init_crq_queue(struct ibmvnic_adapter *adapter);
109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
110 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
111 struct ibmvnic_sub_crq_queue *tx_scrq);
112 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
113 struct ibmvnic_long_term_buff *ltb);
114 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
116 struct ibmvnic_stat {
117 char name[ETH_GSTRING_LEN];
121 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
122 offsetof(struct ibmvnic_statistics, stat))
123 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
125 static const struct ibmvnic_stat ibmvnic_stats[] = {
126 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
127 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
128 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
129 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
130 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
131 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
132 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
133 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
134 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
135 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
136 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
137 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
138 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
139 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
140 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
141 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
142 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
143 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
144 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
145 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
146 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
147 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
150 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
152 union ibmvnic_crq crq;
154 memset(&crq, 0, sizeof(crq));
155 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
156 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
158 return ibmvnic_send_crq(adapter, &crq);
161 static int send_version_xchg(struct ibmvnic_adapter *adapter)
163 union ibmvnic_crq crq;
165 memset(&crq, 0, sizeof(crq));
166 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
167 crq.version_exchange.cmd = VERSION_EXCHANGE;
168 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
170 return ibmvnic_send_crq(adapter, &crq);
173 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
174 unsigned long length, unsigned long *number,
177 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
180 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
188 * ibmvnic_wait_for_completion - Check device state and wait for completion
189 * @adapter: private device data
190 * @comp_done: completion structure to wait for
191 * @timeout: time to wait in milliseconds
193 * Wait for a completion signal or until the timeout limit is reached
194 * while checking that the device is still active.
196 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
197 struct completion *comp_done,
198 unsigned long timeout)
200 struct net_device *netdev;
201 unsigned long div_timeout;
204 netdev = adapter->netdev;
206 div_timeout = msecs_to_jiffies(timeout / retry);
208 if (!adapter->crq.active) {
209 netdev_err(netdev, "Device down!\n");
214 if (wait_for_completion_timeout(comp_done, div_timeout))
217 netdev_err(netdev, "Operation timed out.\n");
222 * reuse_ltb() - Check if a long term buffer can be reused
223 * @ltb: The long term buffer to be checked
224 * @size: The size of the long term buffer.
226 * An LTB can be reused unless its size has changed.
228 * Return: Return true if the LTB can be reused, false otherwise.
230 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
232 return (ltb->buff && ltb->size == size);
236 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
238 * @adapter: ibmvnic adapter associated to the LTB
239 * @ltb: container object for the LTB
240 * @size: size of the LTB
242 * Allocate an LTB of the specified size and notify VIOS.
244 * If the given @ltb already has the correct size, reuse it. Otherwise if
245 * its non-NULL, free it. Then allocate a new one of the correct size.
246 * Notify the VIOS either way since we may now be working with a new VIOS.
248 * Allocating larger chunks of memory during resets, specially LPM or under
249 * low memory situations can cause resets to fail/timeout and for LPAR to
250 * lose connectivity. So hold onto the LTB even if we fail to communicate
251 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
253 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
254 * a negative value otherwise.
256 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
257 struct ibmvnic_long_term_buff *ltb, int size)
259 struct device *dev = &adapter->vdev->dev;
262 if (!reuse_ltb(ltb, size)) {
264 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
266 free_long_term_buff(adapter, ltb);
270 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
271 ltb->map_id, ltb->size);
273 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
276 dev_err(dev, "Couldn't alloc long term buffer\n");
281 ltb->map_id = find_first_zero_bit(adapter->map_ids,
283 bitmap_set(adapter->map_ids, ltb->map_id, 1);
286 "Allocated new LTB [map %d, size 0x%llx]\n",
287 ltb->map_id, ltb->size);
290 /* Ensure ltb is zeroed - specially when reusing it. */
291 memset(ltb->buff, 0, ltb->size);
293 mutex_lock(&adapter->fw_lock);
294 adapter->fw_done_rc = 0;
295 reinit_completion(&adapter->fw_done);
297 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
299 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
303 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
305 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
310 if (adapter->fw_done_rc) {
311 dev_err(dev, "Couldn't map LTB, rc = %d\n",
312 adapter->fw_done_rc);
318 /* don't free LTB on communication error - see function header */
319 mutex_unlock(&adapter->fw_lock);
323 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
324 struct ibmvnic_long_term_buff *ltb)
326 struct device *dev = &adapter->vdev->dev;
331 /* VIOS automatically unmaps the long term buffer at remote
332 * end for the following resets:
333 * FAILOVER, MOBILITY, TIMEOUT.
335 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
336 adapter->reset_reason != VNIC_RESET_MOBILITY &&
337 adapter->reset_reason != VNIC_RESET_TIMEOUT)
338 send_request_unmap(adapter, ltb->map_id);
340 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
343 /* mark this map_id free */
344 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
348 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
352 for (i = 0; i < adapter->num_active_rx_pools; i++)
353 adapter->rx_pool[i].active = 0;
356 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
357 struct ibmvnic_rx_pool *pool)
359 int count = pool->size - atomic_read(&pool->available);
360 u64 handle = adapter->rx_scrq[pool->index]->handle;
361 struct device *dev = &adapter->vdev->dev;
362 struct ibmvnic_ind_xmit_queue *ind_bufp;
363 struct ibmvnic_sub_crq_queue *rx_scrq;
364 union sub_crq *sub_crq;
365 int buffers_added = 0;
366 unsigned long lpar_rc;
378 rx_scrq = adapter->rx_scrq[pool->index];
379 ind_bufp = &rx_scrq->ind_buf;
381 /* netdev_skb_alloc() could have failed after we saved a few skbs
382 * in the indir_buf and we would not have sent them to VIOS yet.
383 * To account for them, start the loop at ind_bufp->index rather
384 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
387 for (i = ind_bufp->index; i < count; ++i) {
388 index = pool->free_map[pool->next_free];
390 /* We maybe reusing the skb from earlier resets. Allocate
391 * only if necessary. But since the LTB may have changed
392 * during reset (see init_rx_pools()), update LTB below
393 * even if reusing skb.
395 skb = pool->rx_buff[index].skb;
397 skb = netdev_alloc_skb(adapter->netdev,
400 dev_err(dev, "Couldn't replenish rx buff\n");
401 adapter->replenish_no_mem++;
406 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
407 pool->next_free = (pool->next_free + 1) % pool->size;
409 /* Copy the skb to the long term mapped DMA buffer */
410 offset = index * pool->buff_size;
411 dst = pool->long_term_buff.buff + offset;
412 memset(dst, 0, pool->buff_size);
413 dma_addr = pool->long_term_buff.addr + offset;
415 /* add the skb to an rx_buff in the pool */
416 pool->rx_buff[index].data = dst;
417 pool->rx_buff[index].dma = dma_addr;
418 pool->rx_buff[index].skb = skb;
419 pool->rx_buff[index].pool_index = pool->index;
420 pool->rx_buff[index].size = pool->buff_size;
422 /* queue the rx_buff for the next send_subcrq_indirect */
423 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
424 memset(sub_crq, 0, sizeof(*sub_crq));
425 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
426 sub_crq->rx_add.correlator =
427 cpu_to_be64((u64)&pool->rx_buff[index]);
428 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
429 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
431 /* The length field of the sCRQ is defined to be 24 bits so the
432 * buffer size needs to be left shifted by a byte before it is
433 * converted to big endian to prevent the last byte from being
436 #ifdef __LITTLE_ENDIAN__
439 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
441 /* if send_subcrq_indirect queue is full, flush to VIOS */
442 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
445 send_subcrq_indirect(adapter, handle,
446 (u64)ind_bufp->indir_dma,
447 (u64)ind_bufp->index);
448 if (lpar_rc != H_SUCCESS)
450 buffers_added += ind_bufp->index;
451 adapter->replenish_add_buff_success += ind_bufp->index;
455 atomic_add(buffers_added, &pool->available);
459 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
460 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
461 for (i = ind_bufp->index - 1; i >= 0; --i) {
462 struct ibmvnic_rx_buff *rx_buff;
464 pool->next_free = pool->next_free == 0 ?
465 pool->size - 1 : pool->next_free - 1;
466 sub_crq = &ind_bufp->indir_arr[i];
467 rx_buff = (struct ibmvnic_rx_buff *)
468 be64_to_cpu(sub_crq->rx_add.correlator);
469 index = (int)(rx_buff - pool->rx_buff);
470 pool->free_map[pool->next_free] = index;
471 dev_kfree_skb_any(pool->rx_buff[index].skb);
472 pool->rx_buff[index].skb = NULL;
474 adapter->replenish_add_buff_failure += ind_bufp->index;
475 atomic_add(buffers_added, &pool->available);
477 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
478 /* Disable buffer pool replenishment and report carrier off if
479 * queue is closed or pending failover.
480 * Firmware guarantees that a signal will be sent to the
481 * driver, triggering a reset.
483 deactivate_rx_pools(adapter);
484 netif_carrier_off(adapter->netdev);
488 static void replenish_pools(struct ibmvnic_adapter *adapter)
492 adapter->replenish_task_cycles++;
493 for (i = 0; i < adapter->num_active_rx_pools; i++) {
494 if (adapter->rx_pool[i].active)
495 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
498 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
501 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
503 kfree(adapter->tx_stats_buffers);
504 kfree(adapter->rx_stats_buffers);
505 adapter->tx_stats_buffers = NULL;
506 adapter->rx_stats_buffers = NULL;
509 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
511 adapter->tx_stats_buffers =
512 kcalloc(IBMVNIC_MAX_QUEUES,
513 sizeof(struct ibmvnic_tx_queue_stats),
515 if (!adapter->tx_stats_buffers)
518 adapter->rx_stats_buffers =
519 kcalloc(IBMVNIC_MAX_QUEUES,
520 sizeof(struct ibmvnic_rx_queue_stats),
522 if (!adapter->rx_stats_buffers)
528 static void release_stats_token(struct ibmvnic_adapter *adapter)
530 struct device *dev = &adapter->vdev->dev;
532 if (!adapter->stats_token)
535 dma_unmap_single(dev, adapter->stats_token,
536 sizeof(struct ibmvnic_statistics),
538 adapter->stats_token = 0;
541 static int init_stats_token(struct ibmvnic_adapter *adapter)
543 struct device *dev = &adapter->vdev->dev;
547 stok = dma_map_single(dev, &adapter->stats,
548 sizeof(struct ibmvnic_statistics),
550 rc = dma_mapping_error(dev, stok);
552 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
556 adapter->stats_token = stok;
557 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
562 * release_rx_pools() - Release any rx pools attached to @adapter.
563 * @adapter: ibmvnic adapter
565 * Safe to call this multiple times - even if no pools are attached.
567 static void release_rx_pools(struct ibmvnic_adapter *adapter)
569 struct ibmvnic_rx_pool *rx_pool;
572 if (!adapter->rx_pool)
575 for (i = 0; i < adapter->num_active_rx_pools; i++) {
576 rx_pool = &adapter->rx_pool[i];
578 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
580 kfree(rx_pool->free_map);
582 free_long_term_buff(adapter, &rx_pool->long_term_buff);
584 if (!rx_pool->rx_buff)
587 for (j = 0; j < rx_pool->size; j++) {
588 if (rx_pool->rx_buff[j].skb) {
589 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
590 rx_pool->rx_buff[j].skb = NULL;
594 kfree(rx_pool->rx_buff);
597 kfree(adapter->rx_pool);
598 adapter->rx_pool = NULL;
599 adapter->num_active_rx_pools = 0;
600 adapter->prev_rx_pool_size = 0;
604 * reuse_rx_pools() - Check if the existing rx pools can be reused.
605 * @adapter: ibmvnic adapter
607 * Check if the existing rx pools in the adapter can be reused. The
608 * pools can be reused if the pool parameters (number of pools,
609 * number of buffers in the pool and size of each buffer) have not
612 * NOTE: This assumes that all pools have the same number of buffers
613 * which is the case currently. If that changes, we must fix this.
615 * Return: true if the rx pools can be reused, false otherwise.
617 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
619 u64 old_num_pools, new_num_pools;
620 u64 old_pool_size, new_pool_size;
621 u64 old_buff_size, new_buff_size;
623 if (!adapter->rx_pool)
626 old_num_pools = adapter->num_active_rx_pools;
627 new_num_pools = adapter->req_rx_queues;
629 old_pool_size = adapter->prev_rx_pool_size;
630 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
632 old_buff_size = adapter->prev_rx_buf_sz;
633 new_buff_size = adapter->cur_rx_buf_sz;
635 if (old_buff_size != new_buff_size ||
636 old_num_pools != new_num_pools ||
637 old_pool_size != new_pool_size)
644 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
645 * @netdev: net device associated with the vnic interface
647 * Initialize the set of receiver pools in the ibmvnic adapter associated
648 * with the net_device @netdev. If possible, reuse the existing rx pools.
649 * Otherwise free any existing pools and allocate a new set of pools
650 * before initializing them.
652 * Return: 0 on success and negative value on error.
654 static int init_rx_pools(struct net_device *netdev)
656 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
657 struct device *dev = &adapter->vdev->dev;
658 struct ibmvnic_rx_pool *rx_pool;
660 u64 pool_size; /* # of buffers in one pool */
664 pool_size = adapter->req_rx_add_entries_per_subcrq;
665 num_pools = adapter->req_rx_queues;
666 buff_size = adapter->cur_rx_buf_sz;
668 if (reuse_rx_pools(adapter)) {
669 dev_dbg(dev, "Reusing rx pools\n");
673 /* Allocate/populate the pools. */
674 release_rx_pools(adapter);
676 adapter->rx_pool = kcalloc(num_pools,
677 sizeof(struct ibmvnic_rx_pool),
679 if (!adapter->rx_pool) {
680 dev_err(dev, "Failed to allocate rx pools\n");
684 /* Set num_active_rx_pools early. If we fail below after partial
685 * allocation, release_rx_pools() will know how many to look for.
687 adapter->num_active_rx_pools = num_pools;
689 for (i = 0; i < num_pools; i++) {
690 rx_pool = &adapter->rx_pool[i];
692 netdev_dbg(adapter->netdev,
693 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
694 i, pool_size, buff_size);
696 rx_pool->size = pool_size;
698 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
700 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
702 if (!rx_pool->free_map) {
703 dev_err(dev, "Couldn't alloc free_map %d\n", i);
708 rx_pool->rx_buff = kcalloc(rx_pool->size,
709 sizeof(struct ibmvnic_rx_buff),
711 if (!rx_pool->rx_buff) {
712 dev_err(dev, "Couldn't alloc rx buffers\n");
718 adapter->prev_rx_pool_size = pool_size;
719 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
722 for (i = 0; i < num_pools; i++) {
723 rx_pool = &adapter->rx_pool[i];
724 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
725 i, rx_pool->size, rx_pool->buff_size);
727 rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
728 rx_pool->size * rx_pool->buff_size);
732 for (j = 0; j < rx_pool->size; ++j) {
733 struct ibmvnic_rx_buff *rx_buff;
735 rx_pool->free_map[j] = j;
737 /* NOTE: Don't clear rx_buff->skb here - will leak
738 * memory! replenish_rx_pool() will reuse skbs or
739 * allocate as necessary.
741 rx_buff = &rx_pool->rx_buff[j];
745 rx_buff->pool_index = 0;
748 /* Mark pool "empty" so replenish_rx_pools() will
749 * update the LTB info for each buffer
751 atomic_set(&rx_pool->available, 0);
752 rx_pool->next_alloc = 0;
753 rx_pool->next_free = 0;
754 /* replenish_rx_pool() may have called deactivate_rx_pools()
755 * on failover. Ensure pool is active now.
761 release_rx_pools(adapter);
763 /* We failed to allocate one or more LTBs or map them on the VIOS.
764 * Hold onto the pools and any LTBs that we did allocate/map.
769 static void release_vpd_data(struct ibmvnic_adapter *adapter)
774 kfree(adapter->vpd->buff);
780 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
781 struct ibmvnic_tx_pool *tx_pool)
783 kfree(tx_pool->tx_buff);
784 kfree(tx_pool->free_map);
785 free_long_term_buff(adapter, &tx_pool->long_term_buff);
789 * release_tx_pools() - Release any tx pools attached to @adapter.
790 * @adapter: ibmvnic adapter
792 * Safe to call this multiple times - even if no pools are attached.
794 static void release_tx_pools(struct ibmvnic_adapter *adapter)
798 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
799 * both NULL or both non-NULL. So we only need to check one.
801 if (!adapter->tx_pool)
804 for (i = 0; i < adapter->num_active_tx_pools; i++) {
805 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
806 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
809 kfree(adapter->tx_pool);
810 adapter->tx_pool = NULL;
811 kfree(adapter->tso_pool);
812 adapter->tso_pool = NULL;
813 adapter->num_active_tx_pools = 0;
814 adapter->prev_tx_pool_size = 0;
817 static int init_one_tx_pool(struct net_device *netdev,
818 struct ibmvnic_tx_pool *tx_pool,
819 int pool_size, int buf_size)
823 tx_pool->tx_buff = kcalloc(pool_size,
824 sizeof(struct ibmvnic_tx_buff),
826 if (!tx_pool->tx_buff)
829 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
830 if (!tx_pool->free_map) {
831 kfree(tx_pool->tx_buff);
832 tx_pool->tx_buff = NULL;
836 for (i = 0; i < pool_size; i++)
837 tx_pool->free_map[i] = i;
839 tx_pool->consumer_index = 0;
840 tx_pool->producer_index = 0;
841 tx_pool->num_buffers = pool_size;
842 tx_pool->buf_size = buf_size;
848 * reuse_tx_pools() - Check if the existing tx pools can be reused.
849 * @adapter: ibmvnic adapter
851 * Check if the existing tx pools in the adapter can be reused. The
852 * pools can be reused if the pool parameters (number of pools,
853 * number of buffers in the pool and mtu) have not changed.
855 * NOTE: This assumes that all pools have the same number of buffers
856 * which is the case currently. If that changes, we must fix this.
858 * Return: true if the tx pools can be reused, false otherwise.
860 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
862 u64 old_num_pools, new_num_pools;
863 u64 old_pool_size, new_pool_size;
864 u64 old_mtu, new_mtu;
866 if (!adapter->tx_pool)
869 old_num_pools = adapter->num_active_tx_pools;
870 new_num_pools = adapter->num_active_tx_scrqs;
871 old_pool_size = adapter->prev_tx_pool_size;
872 new_pool_size = adapter->req_tx_entries_per_subcrq;
873 old_mtu = adapter->prev_mtu;
874 new_mtu = adapter->req_mtu;
876 if (old_mtu != new_mtu ||
877 old_num_pools != new_num_pools ||
878 old_pool_size != new_pool_size)
885 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
886 * @netdev: net device associated with the vnic interface
888 * Initialize the set of transmit pools in the ibmvnic adapter associated
889 * with the net_device @netdev. If possible, reuse the existing tx pools.
890 * Otherwise free any existing pools and allocate a new set of pools
891 * before initializing them.
893 * Return: 0 on success and negative value on error.
895 static int init_tx_pools(struct net_device *netdev)
897 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
898 struct device *dev = &adapter->vdev->dev;
900 u64 pool_size; /* # of buffers in pool */
904 num_pools = adapter->req_tx_queues;
906 /* We must notify the VIOS about the LTB on all resets - but we only
907 * need to alloc/populate pools if either the number of buffers or
908 * size of each buffer in the pool has changed.
910 if (reuse_tx_pools(adapter)) {
911 netdev_dbg(netdev, "Reusing tx pools\n");
915 /* Allocate/populate the pools. */
916 release_tx_pools(adapter);
918 pool_size = adapter->req_tx_entries_per_subcrq;
919 num_pools = adapter->num_active_tx_scrqs;
921 adapter->tx_pool = kcalloc(num_pools,
922 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
923 if (!adapter->tx_pool)
926 adapter->tso_pool = kcalloc(num_pools,
927 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
928 /* To simplify release_tx_pools() ensure that ->tx_pool and
929 * ->tso_pool are either both NULL or both non-NULL.
931 if (!adapter->tso_pool) {
932 kfree(adapter->tx_pool);
933 adapter->tx_pool = NULL;
937 /* Set num_active_tx_pools early. If we fail below after partial
938 * allocation, release_tx_pools() will know how many to look for.
940 adapter->num_active_tx_pools = num_pools;
942 buff_size = adapter->req_mtu + VLAN_HLEN;
943 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
945 for (i = 0; i < num_pools; i++) {
946 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
947 i, adapter->req_tx_entries_per_subcrq, buff_size);
949 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
950 pool_size, buff_size);
954 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
961 adapter->prev_tx_pool_size = pool_size;
962 adapter->prev_mtu = adapter->req_mtu;
965 /* NOTE: All tx_pools have the same number of buffers (which is
966 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
967 * buffers (see calls init_one_tx_pool() for these).
968 * For consistency, we use tx_pool->num_buffers and
969 * tso_pool->num_buffers below.
972 for (i = 0; i < num_pools; i++) {
973 struct ibmvnic_tx_pool *tso_pool;
974 struct ibmvnic_tx_pool *tx_pool;
977 tx_pool = &adapter->tx_pool[i];
978 ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
979 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
983 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
984 i, tx_pool->long_term_buff.buff,
985 tx_pool->num_buffers, tx_pool->buf_size);
987 tx_pool->consumer_index = 0;
988 tx_pool->producer_index = 0;
990 for (j = 0; j < tx_pool->num_buffers; j++)
991 tx_pool->free_map[j] = j;
993 tso_pool = &adapter->tso_pool[i];
994 ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
995 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
999 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
1000 i, tso_pool->long_term_buff.buff,
1001 tso_pool->num_buffers, tso_pool->buf_size);
1003 tso_pool->consumer_index = 0;
1004 tso_pool->producer_index = 0;
1006 for (j = 0; j < tso_pool->num_buffers; j++)
1007 tso_pool->free_map[j] = j;
1012 release_tx_pools(adapter);
1014 /* We failed to allocate one or more LTBs or map them on the VIOS.
1015 * Hold onto the pools and any LTBs that we did allocate/map.
1020 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1024 if (adapter->napi_enabled)
1027 for (i = 0; i < adapter->req_rx_queues; i++)
1028 napi_enable(&adapter->napi[i]);
1030 adapter->napi_enabled = true;
1033 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1037 if (!adapter->napi_enabled)
1040 for (i = 0; i < adapter->req_rx_queues; i++) {
1041 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1042 napi_disable(&adapter->napi[i]);
1045 adapter->napi_enabled = false;
1048 static int init_napi(struct ibmvnic_adapter *adapter)
1052 adapter->napi = kcalloc(adapter->req_rx_queues,
1053 sizeof(struct napi_struct), GFP_KERNEL);
1057 for (i = 0; i < adapter->req_rx_queues; i++) {
1058 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1059 netif_napi_add(adapter->netdev, &adapter->napi[i],
1060 ibmvnic_poll, NAPI_POLL_WEIGHT);
1063 adapter->num_active_rx_napi = adapter->req_rx_queues;
1067 static void release_napi(struct ibmvnic_adapter *adapter)
1074 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1075 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1076 netif_napi_del(&adapter->napi[i]);
1079 kfree(adapter->napi);
1080 adapter->napi = NULL;
1081 adapter->num_active_rx_napi = 0;
1082 adapter->napi_enabled = false;
1085 static const char *adapter_state_to_string(enum vnic_state state)
1110 static int ibmvnic_login(struct net_device *netdev)
1112 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1113 unsigned long timeout = msecs_to_jiffies(20000);
1114 int retry_count = 0;
1121 if (retry_count > retries) {
1122 netdev_warn(netdev, "Login attempts exceeded\n");
1126 adapter->init_done_rc = 0;
1127 reinit_completion(&adapter->init_done);
1128 rc = send_login(adapter);
1132 if (!wait_for_completion_timeout(&adapter->init_done,
1134 netdev_warn(netdev, "Login timed out, retrying...\n");
1136 adapter->init_done_rc = 0;
1141 if (adapter->init_done_rc == ABORTED) {
1142 netdev_warn(netdev, "Login aborted, retrying...\n");
1144 adapter->init_done_rc = 0;
1146 /* FW or device may be busy, so
1147 * wait a bit before retrying login
1150 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1152 release_sub_crqs(adapter, 1);
1156 "Received partial success, retrying...\n");
1157 adapter->init_done_rc = 0;
1158 reinit_completion(&adapter->init_done);
1159 send_query_cap(adapter);
1160 if (!wait_for_completion_timeout(&adapter->init_done,
1163 "Capabilities query timed out\n");
1167 rc = init_sub_crqs(adapter);
1170 "SCRQ initialization failed\n");
1174 rc = init_sub_crq_irqs(adapter);
1177 "SCRQ irq initialization failed\n");
1180 } else if (adapter->init_done_rc) {
1181 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1182 adapter->init_done_rc);
1187 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1189 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1193 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1195 kfree(adapter->login_buf);
1196 adapter->login_buf = NULL;
1199 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1201 kfree(adapter->login_rsp_buf);
1202 adapter->login_rsp_buf = NULL;
1205 static void release_resources(struct ibmvnic_adapter *adapter)
1207 release_vpd_data(adapter);
1209 release_napi(adapter);
1210 release_login_buffer(adapter);
1211 release_login_rsp_buffer(adapter);
1214 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1216 struct net_device *netdev = adapter->netdev;
1217 unsigned long timeout = msecs_to_jiffies(20000);
1218 union ibmvnic_crq crq;
1222 netdev_dbg(netdev, "setting link state %d\n", link_state);
1224 memset(&crq, 0, sizeof(crq));
1225 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1226 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1227 crq.logical_link_state.link_state = link_state;
1232 reinit_completion(&adapter->init_done);
1233 rc = ibmvnic_send_crq(adapter, &crq);
1235 netdev_err(netdev, "Failed to set link state\n");
1239 if (!wait_for_completion_timeout(&adapter->init_done,
1241 netdev_err(netdev, "timeout setting link state\n");
1245 if (adapter->init_done_rc == PARTIALSUCCESS) {
1246 /* Partuial success, delay and re-send */
1249 } else if (adapter->init_done_rc) {
1250 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1251 adapter->init_done_rc);
1252 return adapter->init_done_rc;
1259 static int set_real_num_queues(struct net_device *netdev)
1261 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1264 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1265 adapter->req_tx_queues, adapter->req_rx_queues);
1267 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1269 netdev_err(netdev, "failed to set the number of tx queues\n");
1273 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1275 netdev_err(netdev, "failed to set the number of rx queues\n");
1280 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1282 struct device *dev = &adapter->vdev->dev;
1283 union ibmvnic_crq crq;
1287 if (adapter->vpd->buff)
1288 len = adapter->vpd->len;
1290 mutex_lock(&adapter->fw_lock);
1291 adapter->fw_done_rc = 0;
1292 reinit_completion(&adapter->fw_done);
1294 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1295 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1296 rc = ibmvnic_send_crq(adapter, &crq);
1298 mutex_unlock(&adapter->fw_lock);
1302 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1304 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1305 mutex_unlock(&adapter->fw_lock);
1308 mutex_unlock(&adapter->fw_lock);
1310 if (!adapter->vpd->len)
1313 if (!adapter->vpd->buff)
1314 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1315 else if (adapter->vpd->len != len)
1316 adapter->vpd->buff =
1317 krealloc(adapter->vpd->buff,
1318 adapter->vpd->len, GFP_KERNEL);
1320 if (!adapter->vpd->buff) {
1321 dev_err(dev, "Could allocate VPD buffer\n");
1325 adapter->vpd->dma_addr =
1326 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1328 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1329 dev_err(dev, "Could not map VPD buffer\n");
1330 kfree(adapter->vpd->buff);
1331 adapter->vpd->buff = NULL;
1335 mutex_lock(&adapter->fw_lock);
1336 adapter->fw_done_rc = 0;
1337 reinit_completion(&adapter->fw_done);
1339 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1340 crq.get_vpd.cmd = GET_VPD;
1341 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1342 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1343 rc = ibmvnic_send_crq(adapter, &crq);
1345 kfree(adapter->vpd->buff);
1346 adapter->vpd->buff = NULL;
1347 mutex_unlock(&adapter->fw_lock);
1351 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1353 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1354 kfree(adapter->vpd->buff);
1355 adapter->vpd->buff = NULL;
1356 mutex_unlock(&adapter->fw_lock);
1360 mutex_unlock(&adapter->fw_lock);
1364 static int init_resources(struct ibmvnic_adapter *adapter)
1366 struct net_device *netdev = adapter->netdev;
1369 rc = set_real_num_queues(netdev);
1373 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1377 /* Vital Product Data (VPD) */
1378 rc = ibmvnic_get_vpd(adapter);
1380 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1384 rc = init_napi(adapter);
1388 send_query_map(adapter);
1390 rc = init_rx_pools(netdev);
1394 rc = init_tx_pools(netdev);
1398 static int __ibmvnic_open(struct net_device *netdev)
1400 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1401 enum vnic_state prev_state = adapter->state;
1404 adapter->state = VNIC_OPENING;
1405 replenish_pools(adapter);
1406 ibmvnic_napi_enable(adapter);
1408 /* We're ready to receive frames, enable the sub-crq interrupts and
1409 * set the logical link state to up
1411 for (i = 0; i < adapter->req_rx_queues; i++) {
1412 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1413 if (prev_state == VNIC_CLOSED)
1414 enable_irq(adapter->rx_scrq[i]->irq);
1415 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1418 for (i = 0; i < adapter->req_tx_queues; i++) {
1419 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1420 if (prev_state == VNIC_CLOSED)
1421 enable_irq(adapter->tx_scrq[i]->irq);
1422 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1423 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1426 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1428 ibmvnic_napi_disable(adapter);
1429 ibmvnic_disable_irqs(adapter);
1433 adapter->tx_queues_active = true;
1435 /* Since queues were stopped until now, there shouldn't be any
1436 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1437 * don't need the synchronize_rcu()? Leaving it for consistency
1438 * with setting ->tx_queues_active = false.
1442 netif_tx_start_all_queues(netdev);
1444 if (prev_state == VNIC_CLOSED) {
1445 for (i = 0; i < adapter->req_rx_queues; i++)
1446 napi_schedule(&adapter->napi[i]);
1449 adapter->state = VNIC_OPEN;
1453 static int ibmvnic_open(struct net_device *netdev)
1455 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1460 /* If device failover is pending or we are about to reset, just set
1461 * device state and return. Device operation will be handled by reset
1464 * It should be safe to overwrite the adapter->state here. Since
1465 * we hold the rtnl, either the reset has not actually started or
1466 * the rtnl got dropped during the set_link_state() in do_reset().
1467 * In the former case, no one else is changing the state (again we
1468 * have the rtnl) and in the latter case, do_reset() will detect and
1469 * honor our setting below.
1471 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1472 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1473 adapter_state_to_string(adapter->state),
1474 adapter->failover_pending);
1475 adapter->state = VNIC_OPEN;
1480 if (adapter->state != VNIC_CLOSED) {
1481 rc = ibmvnic_login(netdev);
1485 rc = init_resources(adapter);
1487 netdev_err(netdev, "failed to initialize resources\n");
1492 rc = __ibmvnic_open(netdev);
1495 /* If open failed and there is a pending failover or in-progress reset,
1496 * set device state and return. Device operation will be handled by
1497 * reset routine. See also comments above regarding rtnl.
1500 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1501 adapter->state = VNIC_OPEN;
1506 release_resources(adapter);
1507 release_rx_pools(adapter);
1508 release_tx_pools(adapter);
1514 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1516 struct ibmvnic_rx_pool *rx_pool;
1517 struct ibmvnic_rx_buff *rx_buff;
1522 if (!adapter->rx_pool)
1525 rx_scrqs = adapter->num_active_rx_pools;
1526 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1528 /* Free any remaining skbs in the rx buffer pools */
1529 for (i = 0; i < rx_scrqs; i++) {
1530 rx_pool = &adapter->rx_pool[i];
1531 if (!rx_pool || !rx_pool->rx_buff)
1534 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1535 for (j = 0; j < rx_entries; j++) {
1536 rx_buff = &rx_pool->rx_buff[j];
1537 if (rx_buff && rx_buff->skb) {
1538 dev_kfree_skb_any(rx_buff->skb);
1539 rx_buff->skb = NULL;
1545 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1546 struct ibmvnic_tx_pool *tx_pool)
1548 struct ibmvnic_tx_buff *tx_buff;
1552 if (!tx_pool || !tx_pool->tx_buff)
1555 tx_entries = tx_pool->num_buffers;
1557 for (i = 0; i < tx_entries; i++) {
1558 tx_buff = &tx_pool->tx_buff[i];
1559 if (tx_buff && tx_buff->skb) {
1560 dev_kfree_skb_any(tx_buff->skb);
1561 tx_buff->skb = NULL;
1566 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1571 if (!adapter->tx_pool || !adapter->tso_pool)
1574 tx_scrqs = adapter->num_active_tx_pools;
1576 /* Free any remaining skbs in the tx buffer pools */
1577 for (i = 0; i < tx_scrqs; i++) {
1578 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1579 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1580 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1584 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1586 struct net_device *netdev = adapter->netdev;
1589 if (adapter->tx_scrq) {
1590 for (i = 0; i < adapter->req_tx_queues; i++)
1591 if (adapter->tx_scrq[i]->irq) {
1593 "Disabling tx_scrq[%d] irq\n", i);
1594 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1595 disable_irq(adapter->tx_scrq[i]->irq);
1599 if (adapter->rx_scrq) {
1600 for (i = 0; i < adapter->req_rx_queues; i++) {
1601 if (adapter->rx_scrq[i]->irq) {
1603 "Disabling rx_scrq[%d] irq\n", i);
1604 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1605 disable_irq(adapter->rx_scrq[i]->irq);
1611 static void ibmvnic_cleanup(struct net_device *netdev)
1613 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1615 /* ensure that transmissions are stopped if called by do_reset */
1617 adapter->tx_queues_active = false;
1619 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
1620 * update so they don't restart a queue after we stop it below.
1624 if (test_bit(0, &adapter->resetting))
1625 netif_tx_disable(netdev);
1627 netif_tx_stop_all_queues(netdev);
1629 ibmvnic_napi_disable(adapter);
1630 ibmvnic_disable_irqs(adapter);
1633 static int __ibmvnic_close(struct net_device *netdev)
1635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1638 adapter->state = VNIC_CLOSING;
1639 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1640 adapter->state = VNIC_CLOSED;
1644 static int ibmvnic_close(struct net_device *netdev)
1646 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1649 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1650 adapter_state_to_string(adapter->state),
1651 adapter->failover_pending,
1652 adapter->force_reset_recovery);
1654 /* If device failover is pending, just set device state and return.
1655 * Device operation will be handled by reset routine.
1657 if (adapter->failover_pending) {
1658 adapter->state = VNIC_CLOSED;
1662 rc = __ibmvnic_close(netdev);
1663 ibmvnic_cleanup(netdev);
1664 clean_rx_pools(adapter);
1665 clean_tx_pools(adapter);
1671 * build_hdr_data - creates L2/L3/L4 header data buffer
1672 * @hdr_field: bitfield determining needed headers
1673 * @skb: socket buffer
1674 * @hdr_len: array of header lengths
1675 * @hdr_data: buffer to write the header to
1677 * Reads hdr_field to determine which headers are needed by firmware.
1678 * Builds a buffer containing these headers. Saves individual header
1679 * lengths and total buffer length to be used to build descriptors.
1681 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1682 int *hdr_len, u8 *hdr_data)
1687 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1688 hdr_len[0] = sizeof(struct vlan_ethhdr);
1690 hdr_len[0] = sizeof(struct ethhdr);
1692 if (skb->protocol == htons(ETH_P_IP)) {
1693 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1694 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1695 hdr_len[2] = tcp_hdrlen(skb);
1696 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1697 hdr_len[2] = sizeof(struct udphdr);
1698 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1699 hdr_len[1] = sizeof(struct ipv6hdr);
1700 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1701 hdr_len[2] = tcp_hdrlen(skb);
1702 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1703 hdr_len[2] = sizeof(struct udphdr);
1704 } else if (skb->protocol == htons(ETH_P_ARP)) {
1705 hdr_len[1] = arp_hdr_len(skb->dev);
1709 memset(hdr_data, 0, 120);
1710 if ((hdr_field >> 6) & 1) {
1711 hdr = skb_mac_header(skb);
1712 memcpy(hdr_data, hdr, hdr_len[0]);
1716 if ((hdr_field >> 5) & 1) {
1717 hdr = skb_network_header(skb);
1718 memcpy(hdr_data + len, hdr, hdr_len[1]);
1722 if ((hdr_field >> 4) & 1) {
1723 hdr = skb_transport_header(skb);
1724 memcpy(hdr_data + len, hdr, hdr_len[2]);
1731 * create_hdr_descs - create header and header extension descriptors
1732 * @hdr_field: bitfield determining needed headers
1733 * @hdr_data: buffer containing header data
1734 * @len: length of data buffer
1735 * @hdr_len: array of individual header lengths
1736 * @scrq_arr: descriptor array
1738 * Creates header and, if needed, header extension descriptors and
1739 * places them in a descriptor array, scrq_arr
1742 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1743 union sub_crq *scrq_arr)
1745 union sub_crq hdr_desc;
1751 while (tmp_len > 0) {
1752 cur = hdr_data + len - tmp_len;
1754 memset(&hdr_desc, 0, sizeof(hdr_desc));
1755 if (cur != hdr_data) {
1756 data = hdr_desc.hdr_ext.data;
1757 tmp = tmp_len > 29 ? 29 : tmp_len;
1758 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1759 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1760 hdr_desc.hdr_ext.len = tmp;
1762 data = hdr_desc.hdr.data;
1763 tmp = tmp_len > 24 ? 24 : tmp_len;
1764 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1765 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1766 hdr_desc.hdr.len = tmp;
1767 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1768 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1769 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1770 hdr_desc.hdr.flag = hdr_field << 1;
1772 memcpy(data, cur, tmp);
1774 *scrq_arr = hdr_desc;
1783 * build_hdr_descs_arr - build a header descriptor array
1784 * @skb: tx socket buffer
1785 * @indir_arr: indirect array
1786 * @num_entries: number of descriptors to be sent
1787 * @hdr_field: bit field determining which headers will be sent
1789 * This function will build a TX descriptor array with applicable
1790 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1793 static void build_hdr_descs_arr(struct sk_buff *skb,
1794 union sub_crq *indir_arr,
1795 int *num_entries, u8 hdr_field)
1797 int hdr_len[3] = {0, 0, 0};
1798 u8 hdr_data[140] = {0};
1801 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1803 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1807 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1808 struct net_device *netdev)
1810 /* For some backing devices, mishandling of small packets
1811 * can result in a loss of connection or TX stall. Device
1812 * architects recommend that no packet should be smaller
1813 * than the minimum MTU value provided to the driver, so
1814 * pad any packets to that length
1816 if (skb->len < netdev->min_mtu)
1817 return skb_put_padto(skb, netdev->min_mtu);
1822 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1823 struct ibmvnic_sub_crq_queue *tx_scrq)
1825 struct ibmvnic_ind_xmit_queue *ind_bufp;
1826 struct ibmvnic_tx_buff *tx_buff;
1827 struct ibmvnic_tx_pool *tx_pool;
1828 union sub_crq tx_scrq_entry;
1834 ind_bufp = &tx_scrq->ind_buf;
1835 entries = (u64)ind_bufp->index;
1836 queue_num = tx_scrq->pool_index;
1838 for (i = entries - 1; i >= 0; --i) {
1839 tx_scrq_entry = ind_bufp->indir_arr[i];
1840 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1842 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1843 if (index & IBMVNIC_TSO_POOL_MASK) {
1844 tx_pool = &adapter->tso_pool[queue_num];
1845 index &= ~IBMVNIC_TSO_POOL_MASK;
1847 tx_pool = &adapter->tx_pool[queue_num];
1849 tx_pool->free_map[tx_pool->consumer_index] = index;
1850 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1851 tx_pool->num_buffers - 1 :
1852 tx_pool->consumer_index - 1;
1853 tx_buff = &tx_pool->tx_buff[index];
1854 adapter->netdev->stats.tx_packets--;
1855 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1856 adapter->tx_stats_buffers[queue_num].packets--;
1857 adapter->tx_stats_buffers[queue_num].bytes -=
1859 dev_kfree_skb_any(tx_buff->skb);
1860 tx_buff->skb = NULL;
1861 adapter->netdev->stats.tx_dropped++;
1864 ind_bufp->index = 0;
1866 if (atomic_sub_return(entries, &tx_scrq->used) <=
1867 (adapter->req_tx_entries_per_subcrq / 2) &&
1868 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1871 if (adapter->tx_queues_active) {
1872 netif_wake_subqueue(adapter->netdev, queue_num);
1873 netdev_dbg(adapter->netdev, "Started queue %d\n",
1881 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1882 struct ibmvnic_sub_crq_queue *tx_scrq)
1884 struct ibmvnic_ind_xmit_queue *ind_bufp;
1890 ind_bufp = &tx_scrq->ind_buf;
1891 dma_addr = (u64)ind_bufp->indir_dma;
1892 entries = (u64)ind_bufp->index;
1893 handle = tx_scrq->handle;
1897 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1899 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1901 ind_bufp->index = 0;
1905 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1907 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1908 int queue_num = skb_get_queue_mapping(skb);
1909 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1910 struct device *dev = &adapter->vdev->dev;
1911 struct ibmvnic_ind_xmit_queue *ind_bufp;
1912 struct ibmvnic_tx_buff *tx_buff = NULL;
1913 struct ibmvnic_sub_crq_queue *tx_scrq;
1914 struct ibmvnic_tx_pool *tx_pool;
1915 unsigned int tx_send_failed = 0;
1916 netdev_tx_t ret = NETDEV_TX_OK;
1917 unsigned int tx_map_failed = 0;
1918 union sub_crq indir_arr[16];
1919 unsigned int tx_dropped = 0;
1920 unsigned int tx_packets = 0;
1921 unsigned int tx_bytes = 0;
1922 dma_addr_t data_dma_addr;
1923 struct netdev_queue *txq;
1924 unsigned long lpar_rc;
1925 union sub_crq tx_crq;
1926 unsigned int offset;
1927 int num_entries = 1;
1932 /* If a reset is in progress, drop the packet since
1933 * the scrqs may get torn down. Otherwise use the
1934 * rcu to ensure reset waits for us to complete.
1937 if (!adapter->tx_queues_active) {
1938 dev_kfree_skb_any(skb);
1946 tx_scrq = adapter->tx_scrq[queue_num];
1947 txq = netdev_get_tx_queue(netdev, queue_num);
1948 ind_bufp = &tx_scrq->ind_buf;
1950 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1954 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1958 if (skb_is_gso(skb))
1959 tx_pool = &adapter->tso_pool[queue_num];
1961 tx_pool = &adapter->tx_pool[queue_num];
1963 index = tx_pool->free_map[tx_pool->consumer_index];
1965 if (index == IBMVNIC_INVALID_MAP) {
1966 dev_kfree_skb_any(skb);
1969 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1974 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1976 offset = index * tx_pool->buf_size;
1977 dst = tx_pool->long_term_buff.buff + offset;
1978 memset(dst, 0, tx_pool->buf_size);
1979 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1981 if (skb_shinfo(skb)->nr_frags) {
1985 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1986 cur = skb_headlen(skb);
1988 /* Copy the frags */
1989 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1990 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1992 memcpy(dst + cur, skb_frag_address(frag),
1993 skb_frag_size(frag));
1994 cur += skb_frag_size(frag);
1997 skb_copy_from_linear_data(skb, dst, skb->len);
2000 /* post changes to long_term_buff *dst before VIOS accessing it */
2003 tx_pool->consumer_index =
2004 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2006 tx_buff = &tx_pool->tx_buff[index];
2008 tx_buff->index = index;
2009 tx_buff->pool_index = queue_num;
2011 memset(&tx_crq, 0, sizeof(tx_crq));
2012 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2013 tx_crq.v1.type = IBMVNIC_TX_DESC;
2014 tx_crq.v1.n_crq_elem = 1;
2015 tx_crq.v1.n_sge = 1;
2016 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2018 if (skb_is_gso(skb))
2019 tx_crq.v1.correlator =
2020 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
2022 tx_crq.v1.correlator = cpu_to_be32(index);
2023 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
2024 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2025 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2027 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2028 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2029 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2032 if (skb->protocol == htons(ETH_P_IP)) {
2033 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2034 proto = ip_hdr(skb)->protocol;
2035 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2036 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2037 proto = ipv6_hdr(skb)->nexthdr;
2040 if (proto == IPPROTO_TCP)
2041 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2042 else if (proto == IPPROTO_UDP)
2043 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2045 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2046 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2049 if (skb_is_gso(skb)) {
2050 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2051 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2055 if ((*hdrs >> 7) & 1)
2056 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2058 tx_crq.v1.n_crq_elem = num_entries;
2059 tx_buff->num_entries = num_entries;
2060 /* flush buffer if current entry can not fit */
2061 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2062 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2063 if (lpar_rc != H_SUCCESS)
2067 indir_arr[0] = tx_crq;
2068 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2069 num_entries * sizeof(struct ibmvnic_generic_scrq));
2070 ind_bufp->index += num_entries;
2071 if (__netdev_tx_sent_queue(txq, skb->len,
2072 netdev_xmit_more() &&
2073 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2074 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2075 if (lpar_rc != H_SUCCESS)
2079 if (atomic_add_return(num_entries, &tx_scrq->used)
2080 >= adapter->req_tx_entries_per_subcrq) {
2081 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2082 netif_stop_subqueue(netdev, queue_num);
2086 tx_bytes += skb->len;
2087 txq_trans_cond_update(txq);
2092 dev_kfree_skb_any(skb);
2093 tx_buff->skb = NULL;
2094 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2095 tx_pool->num_buffers - 1 :
2096 tx_pool->consumer_index - 1;
2099 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2100 dev_err_ratelimited(dev, "tx: send failed\n");
2102 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2103 /* Disable TX and report carrier off if queue is closed
2104 * or pending failover.
2105 * Firmware guarantees that a signal will be sent to the
2106 * driver, triggering a reset or some other action.
2108 netif_tx_stop_all_queues(netdev);
2109 netif_carrier_off(netdev);
2113 netdev->stats.tx_dropped += tx_dropped;
2114 netdev->stats.tx_bytes += tx_bytes;
2115 netdev->stats.tx_packets += tx_packets;
2116 adapter->tx_send_failed += tx_send_failed;
2117 adapter->tx_map_failed += tx_map_failed;
2118 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2119 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2120 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2125 static void ibmvnic_set_multi(struct net_device *netdev)
2127 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2128 struct netdev_hw_addr *ha;
2129 union ibmvnic_crq crq;
2131 memset(&crq, 0, sizeof(crq));
2132 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2133 crq.request_capability.cmd = REQUEST_CAPABILITY;
2135 if (netdev->flags & IFF_PROMISC) {
2136 if (!adapter->promisc_supported)
2139 if (netdev->flags & IFF_ALLMULTI) {
2140 /* Accept all multicast */
2141 memset(&crq, 0, sizeof(crq));
2142 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2143 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2144 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2145 ibmvnic_send_crq(adapter, &crq);
2146 } else if (netdev_mc_empty(netdev)) {
2147 /* Reject all multicast */
2148 memset(&crq, 0, sizeof(crq));
2149 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2150 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2151 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2152 ibmvnic_send_crq(adapter, &crq);
2154 /* Accept one or more multicast(s) */
2155 netdev_for_each_mc_addr(ha, netdev) {
2156 memset(&crq, 0, sizeof(crq));
2157 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2158 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2159 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2160 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2162 ibmvnic_send_crq(adapter, &crq);
2168 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2170 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2171 union ibmvnic_crq crq;
2174 if (!is_valid_ether_addr(dev_addr)) {
2175 rc = -EADDRNOTAVAIL;
2179 memset(&crq, 0, sizeof(crq));
2180 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2181 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2182 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2184 mutex_lock(&adapter->fw_lock);
2185 adapter->fw_done_rc = 0;
2186 reinit_completion(&adapter->fw_done);
2188 rc = ibmvnic_send_crq(adapter, &crq);
2191 mutex_unlock(&adapter->fw_lock);
2195 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2196 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2197 if (rc || adapter->fw_done_rc) {
2199 mutex_unlock(&adapter->fw_lock);
2202 mutex_unlock(&adapter->fw_lock);
2205 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2209 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2211 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2212 struct sockaddr *addr = p;
2216 if (!is_valid_ether_addr(addr->sa_data))
2217 return -EADDRNOTAVAIL;
2219 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2220 if (adapter->state != VNIC_PROBED)
2221 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2226 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2229 case VNIC_RESET_FAILOVER:
2231 case VNIC_RESET_MOBILITY:
2233 case VNIC_RESET_FATAL:
2235 case VNIC_RESET_NON_FATAL:
2237 case VNIC_RESET_TIMEOUT:
2239 case VNIC_RESET_CHANGE_PARAM:
2240 return "CHANGE_PARAM";
2241 case VNIC_RESET_PASSIVE_INIT:
2242 return "PASSIVE_INIT";
2248 * Initialize the init_done completion and return code values. We
2249 * can get a transport event just after registering the CRQ and the
2250 * tasklet will use this to communicate the transport event. To ensure
2251 * we don't miss the notification/error, initialize these _before_
2252 * regisering the CRQ.
2254 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2256 reinit_completion(&adapter->init_done);
2257 adapter->init_done_rc = 0;
2261 * do_reset returns zero if we are able to keep processing reset events, or
2262 * non-zero if we hit a fatal error and must halt.
2264 static int do_reset(struct ibmvnic_adapter *adapter,
2265 struct ibmvnic_rwi *rwi, u32 reset_state)
2267 struct net_device *netdev = adapter->netdev;
2268 u64 old_num_rx_queues, old_num_tx_queues;
2269 u64 old_num_rx_slots, old_num_tx_slots;
2272 netdev_dbg(adapter->netdev,
2273 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2274 adapter_state_to_string(adapter->state),
2275 adapter->failover_pending,
2276 reset_reason_to_string(rwi->reset_reason),
2277 adapter_state_to_string(reset_state));
2279 adapter->reset_reason = rwi->reset_reason;
2280 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2281 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2284 /* Now that we have the rtnl lock, clear any pending failover.
2285 * This will ensure ibmvnic_open() has either completed or will
2286 * block until failover is complete.
2288 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2289 adapter->failover_pending = false;
2291 /* read the state and check (again) after getting rtnl */
2292 reset_state = adapter->state;
2294 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2299 netif_carrier_off(netdev);
2301 old_num_rx_queues = adapter->req_rx_queues;
2302 old_num_tx_queues = adapter->req_tx_queues;
2303 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2304 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2306 ibmvnic_cleanup(netdev);
2308 if (reset_state == VNIC_OPEN &&
2309 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2310 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2311 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2312 rc = __ibmvnic_close(netdev);
2316 adapter->state = VNIC_CLOSING;
2318 /* Release the RTNL lock before link state change and
2319 * re-acquire after the link state change to allow
2320 * linkwatch_event to grab the RTNL lock and run during
2324 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2329 if (adapter->state == VNIC_OPEN) {
2330 /* When we dropped rtnl, ibmvnic_open() got
2331 * it and noticed that we are resetting and
2332 * set the adapter state to OPEN. Update our
2333 * new "target" state, and resume the reset
2334 * from VNIC_CLOSING state.
2337 "Open changed state from %s, updating.\n",
2338 adapter_state_to_string(reset_state));
2339 reset_state = VNIC_OPEN;
2340 adapter->state = VNIC_CLOSING;
2343 if (adapter->state != VNIC_CLOSING) {
2344 /* If someone else changed the adapter state
2345 * when we dropped the rtnl, fail the reset
2350 adapter->state = VNIC_CLOSED;
2354 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2355 release_resources(adapter);
2356 release_sub_crqs(adapter, 1);
2357 release_crq_queue(adapter);
2360 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2361 /* remove the closed state so when we call open it appears
2362 * we are coming from the probed state.
2364 adapter->state = VNIC_PROBED;
2366 reinit_init_done(adapter);
2368 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2369 rc = init_crq_queue(adapter);
2370 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2371 rc = ibmvnic_reenable_crq_queue(adapter);
2372 release_sub_crqs(adapter, 1);
2374 rc = ibmvnic_reset_crq(adapter);
2375 if (rc == H_CLOSED || rc == H_SUCCESS) {
2376 rc = vio_enable_interrupts(adapter->vdev);
2378 netdev_err(adapter->netdev,
2379 "Reset failed to enable interrupts. rc=%d\n",
2385 netdev_err(adapter->netdev,
2386 "Reset couldn't initialize crq. rc=%d\n", rc);
2390 rc = ibmvnic_reset_init(adapter, true);
2394 /* If the adapter was in PROBE or DOWN state prior to the reset,
2397 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2402 rc = ibmvnic_login(netdev);
2406 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2407 rc = init_resources(adapter);
2410 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2411 adapter->req_tx_queues != old_num_tx_queues ||
2412 adapter->req_rx_add_entries_per_subcrq !=
2414 adapter->req_tx_entries_per_subcrq !=
2416 !adapter->rx_pool ||
2417 !adapter->tso_pool ||
2418 !adapter->tx_pool) {
2419 release_napi(adapter);
2420 release_vpd_data(adapter);
2422 rc = init_resources(adapter);
2427 rc = init_tx_pools(netdev);
2430 "init tx pools failed (%d)\n",
2435 rc = init_rx_pools(netdev);
2438 "init rx pools failed (%d)\n",
2443 ibmvnic_disable_irqs(adapter);
2445 adapter->state = VNIC_CLOSED;
2447 if (reset_state == VNIC_CLOSED) {
2452 rc = __ibmvnic_open(netdev);
2454 rc = IBMVNIC_OPEN_FAILED;
2458 /* refresh device's multicast list */
2459 ibmvnic_set_multi(netdev);
2461 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2462 adapter->reset_reason == VNIC_RESET_MOBILITY)
2463 __netdev_notify_peers(netdev);
2468 /* restore the adapter state if reset failed */
2470 adapter->state = reset_state;
2471 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2472 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2475 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2476 adapter_state_to_string(adapter->state),
2477 adapter->failover_pending, rc);
2481 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2482 struct ibmvnic_rwi *rwi, u32 reset_state)
2484 struct net_device *netdev = adapter->netdev;
2487 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2488 reset_reason_to_string(rwi->reset_reason));
2490 /* read the state and check (again) after getting rtnl */
2491 reset_state = adapter->state;
2493 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2498 netif_carrier_off(netdev);
2499 adapter->reset_reason = rwi->reset_reason;
2501 ibmvnic_cleanup(netdev);
2502 release_resources(adapter);
2503 release_sub_crqs(adapter, 0);
2504 release_crq_queue(adapter);
2506 /* remove the closed state so when we call open it appears
2507 * we are coming from the probed state.
2509 adapter->state = VNIC_PROBED;
2511 reinit_init_done(adapter);
2513 rc = init_crq_queue(adapter);
2515 netdev_err(adapter->netdev,
2516 "Couldn't initialize crq. rc=%d\n", rc);
2520 rc = ibmvnic_reset_init(adapter, false);
2524 /* If the adapter was in PROBE or DOWN state prior to the reset,
2527 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2530 rc = ibmvnic_login(netdev);
2534 rc = init_resources(adapter);
2538 ibmvnic_disable_irqs(adapter);
2539 adapter->state = VNIC_CLOSED;
2541 if (reset_state == VNIC_CLOSED)
2544 rc = __ibmvnic_open(netdev);
2546 rc = IBMVNIC_OPEN_FAILED;
2550 __netdev_notify_peers(netdev);
2552 /* restore adapter state if reset failed */
2554 adapter->state = reset_state;
2555 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2556 adapter_state_to_string(adapter->state),
2557 adapter->failover_pending, rc);
2561 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2563 struct ibmvnic_rwi *rwi;
2564 unsigned long flags;
2566 spin_lock_irqsave(&adapter->rwi_lock, flags);
2568 if (!list_empty(&adapter->rwi_list)) {
2569 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2571 list_del(&rwi->list);
2576 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2581 * do_passive_init - complete probing when partner device is detected.
2582 * @adapter: ibmvnic_adapter struct
2584 * If the ibmvnic device does not have a partner device to communicate with at boot
2585 * and that partner device comes online at a later time, this function is called
2586 * to complete the initialization process of ibmvnic device.
2587 * Caller is expected to hold rtnl_lock().
2589 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2590 * in the down state.
2591 * Returns 0 upon success and the device is in PROBED state.
2594 static int do_passive_init(struct ibmvnic_adapter *adapter)
2596 unsigned long timeout = msecs_to_jiffies(30000);
2597 struct net_device *netdev = adapter->netdev;
2598 struct device *dev = &adapter->vdev->dev;
2601 netdev_dbg(netdev, "Partner device found, probing.\n");
2603 adapter->state = VNIC_PROBING;
2604 reinit_completion(&adapter->init_done);
2605 adapter->init_done_rc = 0;
2606 adapter->crq.active = true;
2608 rc = send_crq_init_complete(adapter);
2612 rc = send_version_xchg(adapter);
2614 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2616 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2617 dev_err(dev, "Initialization sequence timed out\n");
2622 rc = init_sub_crqs(adapter);
2624 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2628 rc = init_sub_crq_irqs(adapter);
2630 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2634 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2635 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2636 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2638 adapter->state = VNIC_PROBED;
2639 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2644 release_sub_crqs(adapter, 1);
2646 adapter->state = VNIC_DOWN;
2650 static void __ibmvnic_reset(struct work_struct *work)
2652 struct ibmvnic_adapter *adapter;
2653 unsigned int timeout = 5000;
2654 struct ibmvnic_rwi *tmprwi;
2655 bool saved_state = false;
2656 struct ibmvnic_rwi *rwi;
2657 unsigned long flags;
2664 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2665 dev = &adapter->vdev->dev;
2667 /* Wait for ibmvnic_probe() to complete. If probe is taking too long
2668 * or if another reset is in progress, defer work for now. If probe
2669 * eventually fails it will flush and terminate our work.
2671 * Three possibilities here:
2672 * 1. Adpater being removed - just return
2673 * 2. Timed out on probe or another reset in progress - delay the work
2674 * 3. Completed probe - perform any resets in queue
2676 if (adapter->state == VNIC_PROBING &&
2677 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
2678 dev_err(dev, "Reset thread timed out on probe");
2679 queue_delayed_work(system_long_wq,
2680 &adapter->ibmvnic_delayed_reset,
2681 IBMVNIC_RESET_DELAY);
2685 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
2686 if (adapter->state == VNIC_REMOVING)
2689 /* ->rwi_list is stable now (no one else is removing entries) */
2691 /* ibmvnic_probe() may have purged the reset queue after we were
2692 * scheduled to process a reset so there maybe no resets to process.
2693 * Before setting the ->resetting bit though, we have to make sure
2694 * that there is infact a reset to process. Otherwise we may race
2695 * with ibmvnic_open() and end up leaving the vnic down:
2697 * __ibmvnic_reset() ibmvnic_open()
2698 * ----------------- --------------
2700 * set ->resetting bit
2701 * find ->resetting bit is set
2702 * set ->state to IBMVNIC_OPEN (i.e
2703 * assume reset will open device)
2705 * find reset queue empty
2708 * Neither performed vnic login/open and vnic stays down
2710 * If we hold the lock and conditionally set the bit, either we
2711 * or ibmvnic_open() will complete the open.
2714 spin_lock(&adapter->rwi_lock);
2715 if (!list_empty(&adapter->rwi_list)) {
2716 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2717 queue_delayed_work(system_long_wq,
2718 &adapter->ibmvnic_delayed_reset,
2719 IBMVNIC_RESET_DELAY);
2724 spin_unlock(&adapter->rwi_lock);
2729 rwi = get_next_rwi(adapter);
2731 spin_lock_irqsave(&adapter->state_lock, flags);
2733 if (adapter->state == VNIC_REMOVING ||
2734 adapter->state == VNIC_REMOVED) {
2735 spin_unlock_irqrestore(&adapter->state_lock, flags);
2742 reset_state = adapter->state;
2745 spin_unlock_irqrestore(&adapter->state_lock, flags);
2747 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2749 rc = do_passive_init(adapter);
2752 netif_carrier_on(adapter->netdev);
2753 } else if (adapter->force_reset_recovery) {
2754 /* Since we are doing a hard reset now, clear the
2755 * failover_pending flag so we don't ignore any
2756 * future MOBILITY or other resets.
2758 adapter->failover_pending = false;
2760 /* Transport event occurred during previous reset */
2761 if (adapter->wait_for_reset) {
2762 /* Previous was CHANGE_PARAM; caller locked */
2763 adapter->force_reset_recovery = false;
2764 rc = do_hard_reset(adapter, rwi, reset_state);
2767 adapter->force_reset_recovery = false;
2768 rc = do_hard_reset(adapter, rwi, reset_state);
2776 /* If auto-priority-failover is enabled we can get
2777 * back to back failovers during resets, resulting
2778 * in at least two failed resets (from high-priority
2779 * backing device to low-priority one and then back)
2780 * If resets continue to fail beyond that, give the
2781 * adapter some time to settle down before retrying.
2783 if (num_fails >= 3) {
2784 netdev_dbg(adapter->netdev,
2785 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2786 adapter_state_to_string(adapter->state),
2788 set_current_state(TASK_UNINTERRUPTIBLE);
2789 schedule_timeout(60 * HZ);
2792 rc = do_reset(adapter, rwi, reset_state);
2795 adapter->last_reset_time = jiffies;
2798 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2800 rwi = get_next_rwi(adapter);
2803 * If there is another reset queued, free the previous rwi
2804 * and process the new reset even if previous reset failed
2805 * (the previous reset could have failed because of a fail
2806 * over for instance, so process the fail over).
2808 * If there are no resets queued and the previous reset failed,
2809 * the adapter would be in an undefined state. So retry the
2810 * previous reset as a hard reset.
2817 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2818 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
2819 adapter->force_reset_recovery = true;
2822 if (adapter->wait_for_reset) {
2823 adapter->reset_done_rc = rc;
2824 complete(&adapter->reset_done);
2827 clear_bit_unlock(0, &adapter->resetting);
2829 netdev_dbg(adapter->netdev,
2830 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2831 adapter_state_to_string(adapter->state),
2832 adapter->force_reset_recovery,
2833 adapter->wait_for_reset);
2836 static void __ibmvnic_delayed_reset(struct work_struct *work)
2838 struct ibmvnic_adapter *adapter;
2840 adapter = container_of(work, struct ibmvnic_adapter,
2841 ibmvnic_delayed_reset.work);
2842 __ibmvnic_reset(&adapter->ibmvnic_reset);
2845 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
2847 struct list_head *entry, *tmp_entry;
2849 if (!list_empty(&adapter->rwi_list)) {
2850 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
2852 kfree(list_entry(entry, struct ibmvnic_rwi, list));
2857 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2858 enum ibmvnic_reset_reason reason)
2860 struct net_device *netdev = adapter->netdev;
2861 struct ibmvnic_rwi *rwi, *tmp;
2862 unsigned long flags;
2865 spin_lock_irqsave(&adapter->rwi_lock, flags);
2867 /* If failover is pending don't schedule any other reset.
2868 * Instead let the failover complete. If there is already a
2869 * a failover reset scheduled, we will detect and drop the
2870 * duplicate reset when walking the ->rwi_list below.
2872 if (adapter->state == VNIC_REMOVING ||
2873 adapter->state == VNIC_REMOVED ||
2874 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2876 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2880 list_for_each_entry(tmp, &adapter->rwi_list, list) {
2881 if (tmp->reset_reason == reason) {
2882 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2883 reset_reason_to_string(reason));
2889 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2894 /* if we just received a transport event,
2895 * flush reset queue and process this reset
2897 if (adapter->force_reset_recovery)
2898 flush_reset_queue(adapter);
2900 rwi->reset_reason = reason;
2901 list_add_tail(&rwi->list, &adapter->rwi_list);
2902 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2903 reset_reason_to_string(reason));
2904 queue_work(system_long_wq, &adapter->ibmvnic_reset);
2908 /* ibmvnic_close() below can block, so drop the lock first */
2909 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2912 ibmvnic_close(netdev);
2917 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2919 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2921 if (test_bit(0, &adapter->resetting)) {
2922 netdev_err(adapter->netdev,
2923 "Adapter is resetting, skip timeout reset\n");
2926 /* No queuing up reset until at least 5 seconds (default watchdog val)
2929 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2930 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2933 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2936 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2937 struct ibmvnic_rx_buff *rx_buff)
2939 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2941 rx_buff->skb = NULL;
2943 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2944 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2946 atomic_dec(&pool->available);
2949 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2951 struct ibmvnic_sub_crq_queue *rx_scrq;
2952 struct ibmvnic_adapter *adapter;
2953 struct net_device *netdev;
2954 int frames_processed;
2958 adapter = netdev_priv(netdev);
2959 scrq_num = (int)(napi - adapter->napi);
2960 frames_processed = 0;
2961 rx_scrq = adapter->rx_scrq[scrq_num];
2964 while (frames_processed < budget) {
2965 struct sk_buff *skb;
2966 struct ibmvnic_rx_buff *rx_buff;
2967 union sub_crq *next;
2972 if (unlikely(test_bit(0, &adapter->resetting) &&
2973 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2974 enable_scrq_irq(adapter, rx_scrq);
2975 napi_complete_done(napi, frames_processed);
2976 return frames_processed;
2979 if (!pending_scrq(adapter, rx_scrq))
2981 next = ibmvnic_next_scrq(adapter, rx_scrq);
2982 rx_buff = (struct ibmvnic_rx_buff *)
2983 be64_to_cpu(next->rx_comp.correlator);
2984 /* do error checking */
2985 if (next->rx_comp.rc) {
2986 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2987 be16_to_cpu(next->rx_comp.rc));
2988 /* free the entry */
2989 next->rx_comp.first = 0;
2990 dev_kfree_skb_any(rx_buff->skb);
2991 remove_buff_from_pool(adapter, rx_buff);
2993 } else if (!rx_buff->skb) {
2994 /* free the entry */
2995 next->rx_comp.first = 0;
2996 remove_buff_from_pool(adapter, rx_buff);
3000 length = be32_to_cpu(next->rx_comp.len);
3001 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3002 flags = next->rx_comp.flags;
3004 /* load long_term_buff before copying to skb */
3006 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3009 /* VLAN Header has been stripped by the system firmware and
3010 * needs to be inserted by the driver
3012 if (adapter->rx_vlan_header_insertion &&
3013 (flags & IBMVNIC_VLAN_STRIPPED))
3014 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3015 ntohs(next->rx_comp.vlan_tci));
3017 /* free the entry */
3018 next->rx_comp.first = 0;
3019 remove_buff_from_pool(adapter, rx_buff);
3021 skb_put(skb, length);
3022 skb->protocol = eth_type_trans(skb, netdev);
3023 skb_record_rx_queue(skb, scrq_num);
3025 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3026 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3027 skb->ip_summed = CHECKSUM_UNNECESSARY;
3031 napi_gro_receive(napi, skb); /* send it up */
3032 netdev->stats.rx_packets++;
3033 netdev->stats.rx_bytes += length;
3034 adapter->rx_stats_buffers[scrq_num].packets++;
3035 adapter->rx_stats_buffers[scrq_num].bytes += length;
3039 if (adapter->state != VNIC_CLOSING &&
3040 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3041 adapter->req_rx_add_entries_per_subcrq / 2) ||
3042 frames_processed < budget))
3043 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3044 if (frames_processed < budget) {
3045 if (napi_complete_done(napi, frames_processed)) {
3046 enable_scrq_irq(adapter, rx_scrq);
3047 if (pending_scrq(adapter, rx_scrq)) {
3048 if (napi_reschedule(napi)) {
3049 disable_scrq_irq(adapter, rx_scrq);
3055 return frames_processed;
3058 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3062 adapter->fallback.mtu = adapter->req_mtu;
3063 adapter->fallback.rx_queues = adapter->req_rx_queues;
3064 adapter->fallback.tx_queues = adapter->req_tx_queues;
3065 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3066 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3068 reinit_completion(&adapter->reset_done);
3069 adapter->wait_for_reset = true;
3070 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3076 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3083 if (adapter->reset_done_rc) {
3085 adapter->desired.mtu = adapter->fallback.mtu;
3086 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3087 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3088 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3089 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3091 reinit_completion(&adapter->reset_done);
3092 adapter->wait_for_reset = true;
3093 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3098 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3106 adapter->wait_for_reset = false;
3111 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3113 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3115 adapter->desired.mtu = new_mtu + ETH_HLEN;
3117 return wait_for_reset(adapter);
3120 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3121 struct net_device *dev,
3122 netdev_features_t features)
3124 /* Some backing hardware adapters can not
3125 * handle packets with a MSS less than 224
3126 * or with only one segment.
3128 if (skb_is_gso(skb)) {
3129 if (skb_shinfo(skb)->gso_size < 224 ||
3130 skb_shinfo(skb)->gso_segs == 1)
3131 features &= ~NETIF_F_GSO_MASK;
3137 static const struct net_device_ops ibmvnic_netdev_ops = {
3138 .ndo_open = ibmvnic_open,
3139 .ndo_stop = ibmvnic_close,
3140 .ndo_start_xmit = ibmvnic_xmit,
3141 .ndo_set_rx_mode = ibmvnic_set_multi,
3142 .ndo_set_mac_address = ibmvnic_set_mac,
3143 .ndo_validate_addr = eth_validate_addr,
3144 .ndo_tx_timeout = ibmvnic_tx_timeout,
3145 .ndo_change_mtu = ibmvnic_change_mtu,
3146 .ndo_features_check = ibmvnic_features_check,
3149 /* ethtool functions */
3151 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3152 struct ethtool_link_ksettings *cmd)
3154 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3157 rc = send_query_phys_parms(adapter);
3159 adapter->speed = SPEED_UNKNOWN;
3160 adapter->duplex = DUPLEX_UNKNOWN;
3162 cmd->base.speed = adapter->speed;
3163 cmd->base.duplex = adapter->duplex;
3164 cmd->base.port = PORT_FIBRE;
3165 cmd->base.phy_address = 0;
3166 cmd->base.autoneg = AUTONEG_ENABLE;
3171 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3172 struct ethtool_drvinfo *info)
3174 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3176 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3177 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3178 strscpy(info->fw_version, adapter->fw_version,
3179 sizeof(info->fw_version));
3182 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3184 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3186 return adapter->msg_enable;
3189 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3191 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3193 adapter->msg_enable = data;
3196 static u32 ibmvnic_get_link(struct net_device *netdev)
3198 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3200 /* Don't need to send a query because we request a logical link up at
3201 * init and then we wait for link state indications
3203 return adapter->logical_link_state;
3206 static void ibmvnic_get_ringparam(struct net_device *netdev,
3207 struct ethtool_ringparam *ring,
3208 struct kernel_ethtool_ringparam *kernel_ring,
3209 struct netlink_ext_ack *extack)
3211 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3213 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3214 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3215 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3217 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3218 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3220 ring->rx_mini_max_pending = 0;
3221 ring->rx_jumbo_max_pending = 0;
3222 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3223 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3224 ring->rx_mini_pending = 0;
3225 ring->rx_jumbo_pending = 0;
3228 static int ibmvnic_set_ringparam(struct net_device *netdev,
3229 struct ethtool_ringparam *ring,
3230 struct kernel_ethtool_ringparam *kernel_ring,
3231 struct netlink_ext_ack *extack)
3233 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3237 adapter->desired.rx_entries = ring->rx_pending;
3238 adapter->desired.tx_entries = ring->tx_pending;
3240 ret = wait_for_reset(adapter);
3243 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
3244 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
3246 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3247 ring->rx_pending, ring->tx_pending,
3248 adapter->req_rx_add_entries_per_subcrq,
3249 adapter->req_tx_entries_per_subcrq);
3253 static void ibmvnic_get_channels(struct net_device *netdev,
3254 struct ethtool_channels *channels)
3256 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3258 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3259 channels->max_rx = adapter->max_rx_queues;
3260 channels->max_tx = adapter->max_tx_queues;
3262 channels->max_rx = IBMVNIC_MAX_QUEUES;
3263 channels->max_tx = IBMVNIC_MAX_QUEUES;
3266 channels->max_other = 0;
3267 channels->max_combined = 0;
3268 channels->rx_count = adapter->req_rx_queues;
3269 channels->tx_count = adapter->req_tx_queues;
3270 channels->other_count = 0;
3271 channels->combined_count = 0;
3274 static int ibmvnic_set_channels(struct net_device *netdev,
3275 struct ethtool_channels *channels)
3277 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3281 adapter->desired.rx_queues = channels->rx_count;
3282 adapter->desired.tx_queues = channels->tx_count;
3284 ret = wait_for_reset(adapter);
3287 (adapter->req_rx_queues != channels->rx_count ||
3288 adapter->req_tx_queues != channels->tx_count))
3290 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3291 channels->rx_count, channels->tx_count,
3292 adapter->req_rx_queues, adapter->req_tx_queues);
3296 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3298 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3301 switch (stringset) {
3303 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3304 i++, data += ETH_GSTRING_LEN)
3305 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3307 for (i = 0; i < adapter->req_tx_queues; i++) {
3308 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3309 data += ETH_GSTRING_LEN;
3311 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3312 data += ETH_GSTRING_LEN;
3314 snprintf(data, ETH_GSTRING_LEN,
3315 "tx%d_dropped_packets", i);
3316 data += ETH_GSTRING_LEN;
3319 for (i = 0; i < adapter->req_rx_queues; i++) {
3320 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3321 data += ETH_GSTRING_LEN;
3323 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3324 data += ETH_GSTRING_LEN;
3326 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3327 data += ETH_GSTRING_LEN;
3331 case ETH_SS_PRIV_FLAGS:
3332 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3333 strcpy(data + i * ETH_GSTRING_LEN,
3334 ibmvnic_priv_flags[i]);
3341 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3343 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3347 return ARRAY_SIZE(ibmvnic_stats) +
3348 adapter->req_tx_queues * NUM_TX_STATS +
3349 adapter->req_rx_queues * NUM_RX_STATS;
3350 case ETH_SS_PRIV_FLAGS:
3351 return ARRAY_SIZE(ibmvnic_priv_flags);
3357 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3358 struct ethtool_stats *stats, u64 *data)
3360 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3361 union ibmvnic_crq crq;
3365 memset(&crq, 0, sizeof(crq));
3366 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3367 crq.request_statistics.cmd = REQUEST_STATISTICS;
3368 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3369 crq.request_statistics.len =
3370 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3372 /* Wait for data to be written */
3373 reinit_completion(&adapter->stats_done);
3374 rc = ibmvnic_send_crq(adapter, &crq);
3377 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3381 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3382 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3383 (adapter, ibmvnic_stats[i].offset));
3385 for (j = 0; j < adapter->req_tx_queues; j++) {
3386 data[i] = adapter->tx_stats_buffers[j].packets;
3388 data[i] = adapter->tx_stats_buffers[j].bytes;
3390 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3394 for (j = 0; j < adapter->req_rx_queues; j++) {
3395 data[i] = adapter->rx_stats_buffers[j].packets;
3397 data[i] = adapter->rx_stats_buffers[j].bytes;
3399 data[i] = adapter->rx_stats_buffers[j].interrupts;
3404 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3406 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3408 return adapter->priv_flags;
3411 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3413 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3414 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3417 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3419 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3424 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3425 .get_drvinfo = ibmvnic_get_drvinfo,
3426 .get_msglevel = ibmvnic_get_msglevel,
3427 .set_msglevel = ibmvnic_set_msglevel,
3428 .get_link = ibmvnic_get_link,
3429 .get_ringparam = ibmvnic_get_ringparam,
3430 .set_ringparam = ibmvnic_set_ringparam,
3431 .get_channels = ibmvnic_get_channels,
3432 .set_channels = ibmvnic_set_channels,
3433 .get_strings = ibmvnic_get_strings,
3434 .get_sset_count = ibmvnic_get_sset_count,
3435 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3436 .get_link_ksettings = ibmvnic_get_link_ksettings,
3437 .get_priv_flags = ibmvnic_get_priv_flags,
3438 .set_priv_flags = ibmvnic_set_priv_flags,
3441 /* Routines for managing CRQs/sCRQs */
3443 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3444 struct ibmvnic_sub_crq_queue *scrq)
3449 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3454 free_irq(scrq->irq, scrq);
3455 irq_dispose_mapping(scrq->irq);
3460 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3461 atomic_set(&scrq->used, 0);
3463 scrq->ind_buf.index = 0;
3465 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3469 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3470 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3474 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3478 if (!adapter->tx_scrq || !adapter->rx_scrq)
3481 for (i = 0; i < adapter->req_tx_queues; i++) {
3482 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3483 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3488 for (i = 0; i < adapter->req_rx_queues; i++) {
3489 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3490 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3498 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3499 struct ibmvnic_sub_crq_queue *scrq,
3502 struct device *dev = &adapter->vdev->dev;
3505 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3508 /* Close the sub-crqs */
3510 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3511 adapter->vdev->unit_address,
3513 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3516 netdev_err(adapter->netdev,
3517 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3522 dma_free_coherent(dev,
3524 scrq->ind_buf.indir_arr,
3525 scrq->ind_buf.indir_dma);
3527 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3529 free_pages((unsigned long)scrq->msgs, 2);
3533 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3536 struct device *dev = &adapter->vdev->dev;
3537 struct ibmvnic_sub_crq_queue *scrq;
3540 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3545 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3547 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3548 goto zero_page_failed;
3551 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3553 if (dma_mapping_error(dev, scrq->msg_token)) {
3554 dev_warn(dev, "Couldn't map crq queue messages page\n");
3558 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3559 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3561 if (rc == H_RESOURCE)
3562 rc = ibmvnic_reset_crq(adapter);
3564 if (rc == H_CLOSED) {
3565 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3567 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3571 scrq->adapter = adapter;
3572 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3573 scrq->ind_buf.index = 0;
3575 scrq->ind_buf.indir_arr =
3576 dma_alloc_coherent(dev,
3578 &scrq->ind_buf.indir_dma,
3581 if (!scrq->ind_buf.indir_arr)
3584 spin_lock_init(&scrq->lock);
3586 netdev_dbg(adapter->netdev,
3587 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3588 scrq->crq_num, scrq->hw_irq, scrq->irq);
3594 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3595 adapter->vdev->unit_address,
3597 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3599 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3602 free_pages((unsigned long)scrq->msgs, 2);
3609 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3613 if (adapter->tx_scrq) {
3614 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3615 if (!adapter->tx_scrq[i])
3618 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3620 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3621 if (adapter->tx_scrq[i]->irq) {
3622 free_irq(adapter->tx_scrq[i]->irq,
3623 adapter->tx_scrq[i]);
3624 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3625 adapter->tx_scrq[i]->irq = 0;
3628 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3632 kfree(adapter->tx_scrq);
3633 adapter->tx_scrq = NULL;
3634 adapter->num_active_tx_scrqs = 0;
3637 if (adapter->rx_scrq) {
3638 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3639 if (!adapter->rx_scrq[i])
3642 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3644 if (adapter->rx_scrq[i]->irq) {
3645 free_irq(adapter->rx_scrq[i]->irq,
3646 adapter->rx_scrq[i]);
3647 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3648 adapter->rx_scrq[i]->irq = 0;
3651 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3655 kfree(adapter->rx_scrq);
3656 adapter->rx_scrq = NULL;
3657 adapter->num_active_rx_scrqs = 0;
3661 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3662 struct ibmvnic_sub_crq_queue *scrq)
3664 struct device *dev = &adapter->vdev->dev;
3667 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3668 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3670 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3675 /* We can not use the IRQ chip EOI handler because that has the
3676 * unintended effect of changing the interrupt priority.
3678 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
3680 u64 val = 0xff000000 | scrq->hw_irq;
3683 rc = plpar_hcall_norets(H_EOI, val);
3685 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
3688 /* Due to a firmware bug, the hypervisor can send an interrupt to a
3689 * transmit or receive queue just prior to a partition migration.
3690 * Force an EOI after migration.
3692 static void ibmvnic_clear_pending_interrupt(struct device *dev,
3693 struct ibmvnic_sub_crq_queue *scrq)
3695 if (!xive_enabled())
3696 ibmvnic_xics_eoi(dev, scrq);
3699 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3700 struct ibmvnic_sub_crq_queue *scrq)
3702 struct device *dev = &adapter->vdev->dev;
3705 if (scrq->hw_irq > 0x100000000ULL) {
3706 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3710 if (test_bit(0, &adapter->resetting) &&
3711 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3712 ibmvnic_clear_pending_interrupt(dev, scrq);
3715 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3716 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3718 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3723 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3724 struct ibmvnic_sub_crq_queue *scrq)
3726 struct device *dev = &adapter->vdev->dev;
3727 struct ibmvnic_tx_pool *tx_pool;
3728 struct ibmvnic_tx_buff *txbuff;
3729 struct netdev_queue *txq;
3730 union sub_crq *next;
3735 while (pending_scrq(adapter, scrq)) {
3736 unsigned int pool = scrq->pool_index;
3737 int num_entries = 0;
3738 int total_bytes = 0;
3739 int num_packets = 0;
3741 next = ibmvnic_next_scrq(adapter, scrq);
3742 for (i = 0; i < next->tx_comp.num_comps; i++) {
3743 index = be32_to_cpu(next->tx_comp.correlators[i]);
3744 if (index & IBMVNIC_TSO_POOL_MASK) {
3745 tx_pool = &adapter->tso_pool[pool];
3746 index &= ~IBMVNIC_TSO_POOL_MASK;
3748 tx_pool = &adapter->tx_pool[pool];
3751 txbuff = &tx_pool->tx_buff[index];
3753 num_entries += txbuff->num_entries;
3755 total_bytes += txbuff->skb->len;
3756 if (next->tx_comp.rcs[i]) {
3757 dev_err(dev, "tx error %x\n",
3758 next->tx_comp.rcs[i]);
3759 dev_kfree_skb_irq(txbuff->skb);
3761 dev_consume_skb_irq(txbuff->skb);
3765 netdev_warn(adapter->netdev,
3766 "TX completion received with NULL socket buffer\n");
3768 tx_pool->free_map[tx_pool->producer_index] = index;
3769 tx_pool->producer_index =
3770 (tx_pool->producer_index + 1) %
3771 tx_pool->num_buffers;
3773 /* remove tx_comp scrq*/
3774 next->tx_comp.first = 0;
3776 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3777 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3779 if (atomic_sub_return(num_entries, &scrq->used) <=
3780 (adapter->req_tx_entries_per_subcrq / 2) &&
3781 __netif_subqueue_stopped(adapter->netdev,
3782 scrq->pool_index)) {
3784 if (adapter->tx_queues_active) {
3785 netif_wake_subqueue(adapter->netdev,
3787 netdev_dbg(adapter->netdev,
3788 "Started queue %d\n",
3795 enable_scrq_irq(adapter, scrq);
3797 if (pending_scrq(adapter, scrq)) {
3798 disable_scrq_irq(adapter, scrq);
3805 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3807 struct ibmvnic_sub_crq_queue *scrq = instance;
3808 struct ibmvnic_adapter *adapter = scrq->adapter;
3810 disable_scrq_irq(adapter, scrq);
3811 ibmvnic_complete_tx(adapter, scrq);
3816 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3818 struct ibmvnic_sub_crq_queue *scrq = instance;
3819 struct ibmvnic_adapter *adapter = scrq->adapter;
3821 /* When booting a kdump kernel we can hit pending interrupts
3822 * prior to completing driver initialization.
3824 if (unlikely(adapter->state != VNIC_OPEN))
3827 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3829 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3830 disable_scrq_irq(adapter, scrq);
3831 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3837 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3839 struct device *dev = &adapter->vdev->dev;
3840 struct ibmvnic_sub_crq_queue *scrq;
3844 for (i = 0; i < adapter->req_tx_queues; i++) {
3845 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3847 scrq = adapter->tx_scrq[i];
3848 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3852 dev_err(dev, "Error mapping irq\n");
3853 goto req_tx_irq_failed;
3856 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3857 adapter->vdev->unit_address, i);
3858 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3859 0, scrq->name, scrq);
3862 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3864 irq_dispose_mapping(scrq->irq);
3865 goto req_tx_irq_failed;
3869 for (i = 0; i < adapter->req_rx_queues; i++) {
3870 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3872 scrq = adapter->rx_scrq[i];
3873 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3876 dev_err(dev, "Error mapping irq\n");
3877 goto req_rx_irq_failed;
3879 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3880 adapter->vdev->unit_address, i);
3881 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3882 0, scrq->name, scrq);
3884 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3886 irq_dispose_mapping(scrq->irq);
3887 goto req_rx_irq_failed;
3893 for (j = 0; j < i; j++) {
3894 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3895 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3897 i = adapter->req_tx_queues;
3899 for (j = 0; j < i; j++) {
3900 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3901 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3903 release_sub_crqs(adapter, 1);
3907 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3909 struct device *dev = &adapter->vdev->dev;
3910 struct ibmvnic_sub_crq_queue **allqueues;
3911 int registered_queues = 0;
3916 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3918 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3922 for (i = 0; i < total_queues; i++) {
3923 allqueues[i] = init_sub_crq_queue(adapter);
3924 if (!allqueues[i]) {
3925 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3928 registered_queues++;
3931 /* Make sure we were able to register the minimum number of queues */
3932 if (registered_queues <
3933 adapter->min_tx_queues + adapter->min_rx_queues) {
3934 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3938 /* Distribute the failed allocated queues*/
3939 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3940 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3943 if (adapter->req_rx_queues > adapter->min_rx_queues)
3944 adapter->req_rx_queues--;
3949 if (adapter->req_tx_queues > adapter->min_tx_queues)
3950 adapter->req_tx_queues--;
3957 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3958 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3959 if (!adapter->tx_scrq)
3962 for (i = 0; i < adapter->req_tx_queues; i++) {
3963 adapter->tx_scrq[i] = allqueues[i];
3964 adapter->tx_scrq[i]->pool_index = i;
3965 adapter->num_active_tx_scrqs++;
3968 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3969 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3970 if (!adapter->rx_scrq)
3973 for (i = 0; i < adapter->req_rx_queues; i++) {
3974 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3975 adapter->rx_scrq[i]->scrq_num = i;
3976 adapter->num_active_rx_scrqs++;
3983 kfree(adapter->tx_scrq);
3984 adapter->tx_scrq = NULL;
3986 for (i = 0; i < registered_queues; i++)
3987 release_sub_crq_queue(adapter, allqueues[i], 1);
3992 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3994 struct device *dev = &adapter->vdev->dev;
3995 union ibmvnic_crq crq;
3999 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4000 * the PROMISC flag). Initialize this count upfront. When the tasklet
4001 * receives a response to all of these, it will send the next protocol
4002 * message (QUERY_IP_OFFLOAD).
4004 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4005 adapter->promisc_supported)
4011 /* Sub-CRQ entries are 32 byte long */
4012 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4014 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4016 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4017 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4018 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4022 if (adapter->desired.mtu)
4023 adapter->req_mtu = adapter->desired.mtu;
4025 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4027 if (!adapter->desired.tx_entries)
4028 adapter->desired.tx_entries =
4029 adapter->max_tx_entries_per_subcrq;
4030 if (!adapter->desired.rx_entries)
4031 adapter->desired.rx_entries =
4032 adapter->max_rx_add_entries_per_subcrq;
4034 max_entries = IBMVNIC_MAX_LTB_SIZE /
4035 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4037 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4038 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
4039 adapter->desired.tx_entries = max_entries;
4042 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4043 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
4044 adapter->desired.rx_entries = max_entries;
4047 if (adapter->desired.tx_entries)
4048 adapter->req_tx_entries_per_subcrq =
4049 adapter->desired.tx_entries;
4051 adapter->req_tx_entries_per_subcrq =
4052 adapter->max_tx_entries_per_subcrq;
4054 if (adapter->desired.rx_entries)
4055 adapter->req_rx_add_entries_per_subcrq =
4056 adapter->desired.rx_entries;
4058 adapter->req_rx_add_entries_per_subcrq =
4059 adapter->max_rx_add_entries_per_subcrq;
4061 if (adapter->desired.tx_queues)
4062 adapter->req_tx_queues =
4063 adapter->desired.tx_queues;
4065 adapter->req_tx_queues =
4066 adapter->opt_tx_comp_sub_queues;
4068 if (adapter->desired.rx_queues)
4069 adapter->req_rx_queues =
4070 adapter->desired.rx_queues;
4072 adapter->req_rx_queues =
4073 adapter->opt_rx_comp_queues;
4075 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4077 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4079 memset(&crq, 0, sizeof(crq));
4080 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4081 crq.request_capability.cmd = REQUEST_CAPABILITY;
4083 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4084 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4086 ibmvnic_send_crq(adapter, &crq);
4088 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4089 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4091 ibmvnic_send_crq(adapter, &crq);
4093 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4094 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4096 ibmvnic_send_crq(adapter, &crq);
4098 crq.request_capability.capability =
4099 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4100 crq.request_capability.number =
4101 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4103 ibmvnic_send_crq(adapter, &crq);
4105 crq.request_capability.capability =
4106 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4107 crq.request_capability.number =
4108 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4110 ibmvnic_send_crq(adapter, &crq);
4112 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4113 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4115 ibmvnic_send_crq(adapter, &crq);
4117 if (adapter->netdev->flags & IFF_PROMISC) {
4118 if (adapter->promisc_supported) {
4119 crq.request_capability.capability =
4120 cpu_to_be16(PROMISC_REQUESTED);
4121 crq.request_capability.number = cpu_to_be64(1);
4123 ibmvnic_send_crq(adapter, &crq);
4126 crq.request_capability.capability =
4127 cpu_to_be16(PROMISC_REQUESTED);
4128 crq.request_capability.number = cpu_to_be64(0);
4130 ibmvnic_send_crq(adapter, &crq);
4133 /* Keep at end to catch any discrepancy between expected and actual
4136 WARN_ON(cap_reqs != 0);
4139 static int pending_scrq(struct ibmvnic_adapter *adapter,
4140 struct ibmvnic_sub_crq_queue *scrq)
4142 union sub_crq *entry = &scrq->msgs[scrq->cur];
4145 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4147 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4148 * contents of the SCRQ descriptor
4155 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4156 struct ibmvnic_sub_crq_queue *scrq)
4158 union sub_crq *entry;
4159 unsigned long flags;
4161 spin_lock_irqsave(&scrq->lock, flags);
4162 entry = &scrq->msgs[scrq->cur];
4163 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4164 if (++scrq->cur == scrq->size)
4169 spin_unlock_irqrestore(&scrq->lock, flags);
4171 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4172 * contents of the SCRQ descriptor
4179 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4181 struct ibmvnic_crq_queue *queue = &adapter->crq;
4182 union ibmvnic_crq *crq;
4184 crq = &queue->msgs[queue->cur];
4185 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4186 if (++queue->cur == queue->size)
4195 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4199 dev_warn_ratelimited(dev,
4200 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4204 dev_warn_ratelimited(dev,
4205 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4209 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4214 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4215 u64 remote_handle, u64 ioba, u64 num_entries)
4217 unsigned int ua = adapter->vdev->unit_address;
4218 struct device *dev = &adapter->vdev->dev;
4221 /* Make sure the hypervisor sees the complete request */
4223 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4224 cpu_to_be64(remote_handle),
4228 print_subcrq_error(dev, rc, __func__);
4233 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4234 union ibmvnic_crq *crq)
4236 unsigned int ua = adapter->vdev->unit_address;
4237 struct device *dev = &adapter->vdev->dev;
4238 u64 *u64_crq = (u64 *)crq;
4241 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4242 (unsigned long)cpu_to_be64(u64_crq[0]),
4243 (unsigned long)cpu_to_be64(u64_crq[1]));
4245 if (!adapter->crq.active &&
4246 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4247 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4251 /* Make sure the hypervisor sees the complete request */
4254 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4255 cpu_to_be64(u64_crq[0]),
4256 cpu_to_be64(u64_crq[1]));
4259 if (rc == H_CLOSED) {
4260 dev_warn(dev, "CRQ Queue closed\n");
4261 /* do not reset, report the fail, wait for passive init from server */
4264 dev_warn(dev, "Send error (rc=%d)\n", rc);
4270 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4272 struct device *dev = &adapter->vdev->dev;
4273 union ibmvnic_crq crq;
4277 memset(&crq, 0, sizeof(crq));
4278 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4279 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4280 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4283 rc = ibmvnic_send_crq(adapter, &crq);
4289 } while (retries > 0);
4292 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4299 struct vnic_login_client_data {
4305 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4309 /* Calculate the amount of buffer space needed for the
4310 * vnic client data in the login buffer. There are four entries,
4311 * OS name, LPAR name, device name, and a null last entry.
4313 len = 4 * sizeof(struct vnic_login_client_data);
4314 len += 6; /* "Linux" plus NULL */
4315 len += strlen(utsname()->nodename) + 1;
4316 len += strlen(adapter->netdev->name) + 1;
4321 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4322 struct vnic_login_client_data *vlcd)
4324 const char *os_name = "Linux";
4327 /* Type 1 - LPAR OS */
4329 len = strlen(os_name) + 1;
4330 vlcd->len = cpu_to_be16(len);
4331 strscpy(vlcd->name, os_name, len);
4332 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4334 /* Type 2 - LPAR name */
4336 len = strlen(utsname()->nodename) + 1;
4337 vlcd->len = cpu_to_be16(len);
4338 strscpy(vlcd->name, utsname()->nodename, len);
4339 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4341 /* Type 3 - device name */
4343 len = strlen(adapter->netdev->name) + 1;
4344 vlcd->len = cpu_to_be16(len);
4345 strscpy(vlcd->name, adapter->netdev->name, len);
4348 static int send_login(struct ibmvnic_adapter *adapter)
4350 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4351 struct ibmvnic_login_buffer *login_buffer;
4352 struct device *dev = &adapter->vdev->dev;
4353 struct vnic_login_client_data *vlcd;
4354 dma_addr_t rsp_buffer_token;
4355 dma_addr_t buffer_token;
4356 size_t rsp_buffer_size;
4357 union ibmvnic_crq crq;
4358 int client_data_len;
4365 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4366 netdev_err(adapter->netdev,
4367 "RX or TX queues are not allocated, device login failed\n");
4371 release_login_buffer(adapter);
4372 release_login_rsp_buffer(adapter);
4374 client_data_len = vnic_client_data_len(adapter);
4377 sizeof(struct ibmvnic_login_buffer) +
4378 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4381 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4383 goto buf_alloc_failed;
4385 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4387 if (dma_mapping_error(dev, buffer_token)) {
4388 dev_err(dev, "Couldn't map login buffer\n");
4389 goto buf_map_failed;
4392 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4393 sizeof(u64) * adapter->req_tx_queues +
4394 sizeof(u64) * adapter->req_rx_queues +
4395 sizeof(u64) * adapter->req_rx_queues +
4396 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4398 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4399 if (!login_rsp_buffer)
4400 goto buf_rsp_alloc_failed;
4402 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4403 rsp_buffer_size, DMA_FROM_DEVICE);
4404 if (dma_mapping_error(dev, rsp_buffer_token)) {
4405 dev_err(dev, "Couldn't map login rsp buffer\n");
4406 goto buf_rsp_map_failed;
4409 adapter->login_buf = login_buffer;
4410 adapter->login_buf_token = buffer_token;
4411 adapter->login_buf_sz = buffer_size;
4412 adapter->login_rsp_buf = login_rsp_buffer;
4413 adapter->login_rsp_buf_token = rsp_buffer_token;
4414 adapter->login_rsp_buf_sz = rsp_buffer_size;
4416 login_buffer->len = cpu_to_be32(buffer_size);
4417 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4418 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4419 login_buffer->off_txcomp_subcrqs =
4420 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4421 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4422 login_buffer->off_rxcomp_subcrqs =
4423 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4424 sizeof(u64) * adapter->req_tx_queues);
4425 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4426 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4428 tx_list_p = (__be64 *)((char *)login_buffer +
4429 sizeof(struct ibmvnic_login_buffer));
4430 rx_list_p = (__be64 *)((char *)login_buffer +
4431 sizeof(struct ibmvnic_login_buffer) +
4432 sizeof(u64) * adapter->req_tx_queues);
4434 for (i = 0; i < adapter->req_tx_queues; i++) {
4435 if (adapter->tx_scrq[i]) {
4437 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4441 for (i = 0; i < adapter->req_rx_queues; i++) {
4442 if (adapter->rx_scrq[i]) {
4444 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4448 /* Insert vNIC login client data */
4449 vlcd = (struct vnic_login_client_data *)
4450 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4451 login_buffer->client_data_offset =
4452 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4453 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4455 vnic_add_client_data(adapter, vlcd);
4457 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4458 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4459 netdev_dbg(adapter->netdev, "%016lx\n",
4460 ((unsigned long *)(adapter->login_buf))[i]);
4463 memset(&crq, 0, sizeof(crq));
4464 crq.login.first = IBMVNIC_CRQ_CMD;
4465 crq.login.cmd = LOGIN;
4466 crq.login.ioba = cpu_to_be32(buffer_token);
4467 crq.login.len = cpu_to_be32(buffer_size);
4469 adapter->login_pending = true;
4470 rc = ibmvnic_send_crq(adapter, &crq);
4472 adapter->login_pending = false;
4473 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4474 goto buf_rsp_map_failed;
4480 kfree(login_rsp_buffer);
4481 adapter->login_rsp_buf = NULL;
4482 buf_rsp_alloc_failed:
4483 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4485 kfree(login_buffer);
4486 adapter->login_buf = NULL;
4491 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4494 union ibmvnic_crq crq;
4496 memset(&crq, 0, sizeof(crq));
4497 crq.request_map.first = IBMVNIC_CRQ_CMD;
4498 crq.request_map.cmd = REQUEST_MAP;
4499 crq.request_map.map_id = map_id;
4500 crq.request_map.ioba = cpu_to_be32(addr);
4501 crq.request_map.len = cpu_to_be32(len);
4502 return ibmvnic_send_crq(adapter, &crq);
4505 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4507 union ibmvnic_crq crq;
4509 memset(&crq, 0, sizeof(crq));
4510 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4511 crq.request_unmap.cmd = REQUEST_UNMAP;
4512 crq.request_unmap.map_id = map_id;
4513 return ibmvnic_send_crq(adapter, &crq);
4516 static void send_query_map(struct ibmvnic_adapter *adapter)
4518 union ibmvnic_crq crq;
4520 memset(&crq, 0, sizeof(crq));
4521 crq.query_map.first = IBMVNIC_CRQ_CMD;
4522 crq.query_map.cmd = QUERY_MAP;
4523 ibmvnic_send_crq(adapter, &crq);
4526 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4527 static void send_query_cap(struct ibmvnic_adapter *adapter)
4529 union ibmvnic_crq crq;
4532 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
4533 * upfront. When the tasklet receives a response to all of these, it
4534 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4538 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4540 memset(&crq, 0, sizeof(crq));
4541 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4542 crq.query_capability.cmd = QUERY_CAPABILITY;
4544 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4545 ibmvnic_send_crq(adapter, &crq);
4548 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4549 ibmvnic_send_crq(adapter, &crq);
4552 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4553 ibmvnic_send_crq(adapter, &crq);
4556 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4557 ibmvnic_send_crq(adapter, &crq);
4560 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4561 ibmvnic_send_crq(adapter, &crq);
4564 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4565 ibmvnic_send_crq(adapter, &crq);
4568 crq.query_capability.capability =
4569 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4570 ibmvnic_send_crq(adapter, &crq);
4573 crq.query_capability.capability =
4574 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4575 ibmvnic_send_crq(adapter, &crq);
4578 crq.query_capability.capability =
4579 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4580 ibmvnic_send_crq(adapter, &crq);
4583 crq.query_capability.capability =
4584 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4585 ibmvnic_send_crq(adapter, &crq);
4588 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4589 ibmvnic_send_crq(adapter, &crq);
4592 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4593 ibmvnic_send_crq(adapter, &crq);
4596 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4597 ibmvnic_send_crq(adapter, &crq);
4600 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4601 ibmvnic_send_crq(adapter, &crq);
4604 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4605 ibmvnic_send_crq(adapter, &crq);
4608 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4609 ibmvnic_send_crq(adapter, &crq);
4612 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4613 ibmvnic_send_crq(adapter, &crq);
4616 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4617 ibmvnic_send_crq(adapter, &crq);
4620 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4621 ibmvnic_send_crq(adapter, &crq);
4624 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4625 ibmvnic_send_crq(adapter, &crq);
4628 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4629 ibmvnic_send_crq(adapter, &crq);
4632 crq.query_capability.capability =
4633 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4634 ibmvnic_send_crq(adapter, &crq);
4637 crq.query_capability.capability =
4638 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4639 ibmvnic_send_crq(adapter, &crq);
4642 crq.query_capability.capability =
4643 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4644 ibmvnic_send_crq(adapter, &crq);
4647 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4649 ibmvnic_send_crq(adapter, &crq);
4652 /* Keep at end to catch any discrepancy between expected and actual
4655 WARN_ON(cap_reqs != 0);
4658 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4660 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4661 struct device *dev = &adapter->vdev->dev;
4662 union ibmvnic_crq crq;
4664 adapter->ip_offload_tok =
4666 &adapter->ip_offload_buf,
4670 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4671 if (!firmware_has_feature(FW_FEATURE_CMO))
4672 dev_err(dev, "Couldn't map offload buffer\n");
4676 memset(&crq, 0, sizeof(crq));
4677 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4678 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4679 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4680 crq.query_ip_offload.ioba =
4681 cpu_to_be32(adapter->ip_offload_tok);
4683 ibmvnic_send_crq(adapter, &crq);
4686 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4688 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4689 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4690 struct device *dev = &adapter->vdev->dev;
4691 netdev_features_t old_hw_features = 0;
4692 union ibmvnic_crq crq;
4694 adapter->ip_offload_ctrl_tok =
4697 sizeof(adapter->ip_offload_ctrl),
4700 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4701 dev_err(dev, "Couldn't map ip offload control buffer\n");
4705 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4706 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4707 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4708 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4709 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4710 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4711 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4712 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4713 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4714 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4716 /* large_rx disabled for now, additional features needed */
4717 ctrl_buf->large_rx_ipv4 = 0;
4718 ctrl_buf->large_rx_ipv6 = 0;
4720 if (adapter->state != VNIC_PROBING) {
4721 old_hw_features = adapter->netdev->hw_features;
4722 adapter->netdev->hw_features = 0;
4725 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4727 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4728 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4730 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4731 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4733 if ((adapter->netdev->features &
4734 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4735 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4737 if (buf->large_tx_ipv4)
4738 adapter->netdev->hw_features |= NETIF_F_TSO;
4739 if (buf->large_tx_ipv6)
4740 adapter->netdev->hw_features |= NETIF_F_TSO6;
4742 if (adapter->state == VNIC_PROBING) {
4743 adapter->netdev->features |= adapter->netdev->hw_features;
4744 } else if (old_hw_features != adapter->netdev->hw_features) {
4745 netdev_features_t tmp = 0;
4747 /* disable features no longer supported */
4748 adapter->netdev->features &= adapter->netdev->hw_features;
4749 /* turn on features now supported if previously enabled */
4750 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4751 adapter->netdev->hw_features;
4752 adapter->netdev->features |=
4753 tmp & adapter->netdev->wanted_features;
4756 memset(&crq, 0, sizeof(crq));
4757 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4758 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4759 crq.control_ip_offload.len =
4760 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4761 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4762 ibmvnic_send_crq(adapter, &crq);
4765 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4766 struct ibmvnic_adapter *adapter)
4768 struct device *dev = &adapter->vdev->dev;
4770 if (crq->get_vpd_size_rsp.rc.code) {
4771 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4772 crq->get_vpd_size_rsp.rc.code);
4773 complete(&adapter->fw_done);
4777 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4778 complete(&adapter->fw_done);
4781 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4782 struct ibmvnic_adapter *adapter)
4784 struct device *dev = &adapter->vdev->dev;
4785 unsigned char *substr = NULL;
4786 u8 fw_level_len = 0;
4788 memset(adapter->fw_version, 0, 32);
4790 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4793 if (crq->get_vpd_rsp.rc.code) {
4794 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4795 crq->get_vpd_rsp.rc.code);
4799 /* get the position of the firmware version info
4800 * located after the ASCII 'RM' substring in the buffer
4802 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4804 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4808 /* get length of firmware level ASCII substring */
4809 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4810 fw_level_len = *(substr + 2);
4812 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4816 /* copy firmware version string from vpd into adapter */
4817 if ((substr + 3 + fw_level_len) <
4818 (adapter->vpd->buff + adapter->vpd->len)) {
4819 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4821 dev_info(dev, "FW substr extrapolated VPD buff\n");
4825 if (adapter->fw_version[0] == '\0')
4826 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4827 complete(&adapter->fw_done);
4830 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4832 struct device *dev = &adapter->vdev->dev;
4833 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4836 dma_unmap_single(dev, adapter->ip_offload_tok,
4837 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4839 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4840 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4841 netdev_dbg(adapter->netdev, "%016lx\n",
4842 ((unsigned long *)(buf))[i]);
4844 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4845 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4846 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4847 buf->tcp_ipv4_chksum);
4848 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4849 buf->tcp_ipv6_chksum);
4850 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4851 buf->udp_ipv4_chksum);
4852 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4853 buf->udp_ipv6_chksum);
4854 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4855 buf->large_tx_ipv4);
4856 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4857 buf->large_tx_ipv6);
4858 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4859 buf->large_rx_ipv4);
4860 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4861 buf->large_rx_ipv6);
4862 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4863 buf->max_ipv4_header_size);
4864 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4865 buf->max_ipv6_header_size);
4866 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4867 buf->max_tcp_header_size);
4868 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4869 buf->max_udp_header_size);
4870 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4871 buf->max_large_tx_size);
4872 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4873 buf->max_large_rx_size);
4874 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4875 buf->ipv6_extension_header);
4876 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4877 buf->tcp_pseudosum_req);
4878 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4879 buf->num_ipv6_ext_headers);
4880 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4881 buf->off_ipv6_ext_headers);
4883 send_control_ip_offload(adapter);
4886 static const char *ibmvnic_fw_err_cause(u16 cause)
4889 case ADAPTER_PROBLEM:
4890 return "adapter problem";
4892 return "bus problem";
4894 return "firmware problem";
4896 return "device driver problem";
4898 return "EEH recovery";
4900 return "firmware updated";
4902 return "low Memory";
4908 static void handle_error_indication(union ibmvnic_crq *crq,
4909 struct ibmvnic_adapter *adapter)
4911 struct device *dev = &adapter->vdev->dev;
4914 cause = be16_to_cpu(crq->error_indication.error_cause);
4916 dev_warn_ratelimited(dev,
4917 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4918 crq->error_indication.flags
4919 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4920 ibmvnic_fw_err_cause(cause));
4922 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4923 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4925 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4928 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4929 struct ibmvnic_adapter *adapter)
4931 struct net_device *netdev = adapter->netdev;
4932 struct device *dev = &adapter->vdev->dev;
4935 rc = crq->change_mac_addr_rsp.rc.code;
4937 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4940 /* crq->change_mac_addr.mac_addr is the requested one
4941 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4943 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
4944 ether_addr_copy(adapter->mac_addr,
4945 &crq->change_mac_addr_rsp.mac_addr[0]);
4947 complete(&adapter->fw_done);
4951 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4952 struct ibmvnic_adapter *adapter)
4954 struct device *dev = &adapter->vdev->dev;
4958 atomic_dec(&adapter->running_cap_crqs);
4959 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4960 atomic_read(&adapter->running_cap_crqs));
4961 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4963 req_value = &adapter->req_tx_queues;
4967 req_value = &adapter->req_rx_queues;
4970 case REQ_RX_ADD_QUEUES:
4971 req_value = &adapter->req_rx_add_queues;
4974 case REQ_TX_ENTRIES_PER_SUBCRQ:
4975 req_value = &adapter->req_tx_entries_per_subcrq;
4976 name = "tx_entries_per_subcrq";
4978 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4979 req_value = &adapter->req_rx_add_entries_per_subcrq;
4980 name = "rx_add_entries_per_subcrq";
4983 req_value = &adapter->req_mtu;
4986 case PROMISC_REQUESTED:
4987 req_value = &adapter->promisc;
4991 dev_err(dev, "Got invalid cap request rsp %d\n",
4992 crq->request_capability.capability);
4996 switch (crq->request_capability_rsp.rc.code) {
4999 case PARTIALSUCCESS:
5000 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5002 (long)be64_to_cpu(crq->request_capability_rsp.number),
5005 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5007 pr_err("mtu of %llu is not supported. Reverting.\n",
5009 *req_value = adapter->fallback.mtu;
5012 be64_to_cpu(crq->request_capability_rsp.number);
5015 send_request_cap(adapter, 1);
5018 dev_err(dev, "Error %d in request cap rsp\n",
5019 crq->request_capability_rsp.rc.code);
5023 /* Done receiving requested capabilities, query IP offload support */
5024 if (atomic_read(&adapter->running_cap_crqs) == 0)
5025 send_query_ip_offload(adapter);
5028 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5029 struct ibmvnic_adapter *adapter)
5031 struct device *dev = &adapter->vdev->dev;
5032 struct net_device *netdev = adapter->netdev;
5033 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5034 struct ibmvnic_login_buffer *login = adapter->login_buf;
5035 u64 *tx_handle_array;
5036 u64 *rx_handle_array;
5042 /* CHECK: Test/set of login_pending does not need to be atomic
5043 * because only ibmvnic_tasklet tests/clears this.
5045 if (!adapter->login_pending) {
5046 netdev_warn(netdev, "Ignoring unexpected login response\n");
5049 adapter->login_pending = false;
5051 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
5053 dma_unmap_single(dev, adapter->login_rsp_buf_token,
5054 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
5056 /* If the number of queues requested can't be allocated by the
5057 * server, the login response will return with code 1. We will need
5058 * to resend the login buffer with fewer queues requested.
5060 if (login_rsp_crq->generic.rc.code) {
5061 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5062 complete(&adapter->init_done);
5066 if (adapter->failover_pending) {
5067 adapter->init_done_rc = -EAGAIN;
5068 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5069 complete(&adapter->init_done);
5070 /* login response buffer will be released on reset */
5074 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5076 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5077 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5078 netdev_dbg(adapter->netdev, "%016lx\n",
5079 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5083 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5084 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5085 adapter->req_rx_add_queues !=
5086 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5087 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5088 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5091 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5092 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5093 /* variable buffer sizes are not supported, so just read the
5096 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5098 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5099 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5101 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5102 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5103 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5104 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5106 for (i = 0; i < num_tx_pools; i++)
5107 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5109 for (i = 0; i < num_rx_pools; i++)
5110 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5112 adapter->num_active_tx_scrqs = num_tx_pools;
5113 adapter->num_active_rx_scrqs = num_rx_pools;
5114 release_login_rsp_buffer(adapter);
5115 release_login_buffer(adapter);
5116 complete(&adapter->init_done);
5121 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5122 struct ibmvnic_adapter *adapter)
5124 struct device *dev = &adapter->vdev->dev;
5127 rc = crq->request_unmap_rsp.rc.code;
5129 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5132 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5133 struct ibmvnic_adapter *adapter)
5135 struct net_device *netdev = adapter->netdev;
5136 struct device *dev = &adapter->vdev->dev;
5139 rc = crq->query_map_rsp.rc.code;
5141 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5144 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5145 crq->query_map_rsp.page_size,
5146 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5147 __be32_to_cpu(crq->query_map_rsp.free_pages));
5150 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5151 struct ibmvnic_adapter *adapter)
5153 struct net_device *netdev = adapter->netdev;
5154 struct device *dev = &adapter->vdev->dev;
5157 atomic_dec(&adapter->running_cap_crqs);
5158 netdev_dbg(netdev, "Outstanding queries: %d\n",
5159 atomic_read(&adapter->running_cap_crqs));
5160 rc = crq->query_capability.rc.code;
5162 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5166 switch (be16_to_cpu(crq->query_capability.capability)) {
5168 adapter->min_tx_queues =
5169 be64_to_cpu(crq->query_capability.number);
5170 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5171 adapter->min_tx_queues);
5174 adapter->min_rx_queues =
5175 be64_to_cpu(crq->query_capability.number);
5176 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5177 adapter->min_rx_queues);
5179 case MIN_RX_ADD_QUEUES:
5180 adapter->min_rx_add_queues =
5181 be64_to_cpu(crq->query_capability.number);
5182 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5183 adapter->min_rx_add_queues);
5186 adapter->max_tx_queues =
5187 be64_to_cpu(crq->query_capability.number);
5188 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5189 adapter->max_tx_queues);
5192 adapter->max_rx_queues =
5193 be64_to_cpu(crq->query_capability.number);
5194 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5195 adapter->max_rx_queues);
5197 case MAX_RX_ADD_QUEUES:
5198 adapter->max_rx_add_queues =
5199 be64_to_cpu(crq->query_capability.number);
5200 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5201 adapter->max_rx_add_queues);
5203 case MIN_TX_ENTRIES_PER_SUBCRQ:
5204 adapter->min_tx_entries_per_subcrq =
5205 be64_to_cpu(crq->query_capability.number);
5206 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5207 adapter->min_tx_entries_per_subcrq);
5209 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5210 adapter->min_rx_add_entries_per_subcrq =
5211 be64_to_cpu(crq->query_capability.number);
5212 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5213 adapter->min_rx_add_entries_per_subcrq);
5215 case MAX_TX_ENTRIES_PER_SUBCRQ:
5216 adapter->max_tx_entries_per_subcrq =
5217 be64_to_cpu(crq->query_capability.number);
5218 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5219 adapter->max_tx_entries_per_subcrq);
5221 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5222 adapter->max_rx_add_entries_per_subcrq =
5223 be64_to_cpu(crq->query_capability.number);
5224 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5225 adapter->max_rx_add_entries_per_subcrq);
5227 case TCP_IP_OFFLOAD:
5228 adapter->tcp_ip_offload =
5229 be64_to_cpu(crq->query_capability.number);
5230 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5231 adapter->tcp_ip_offload);
5233 case PROMISC_SUPPORTED:
5234 adapter->promisc_supported =
5235 be64_to_cpu(crq->query_capability.number);
5236 netdev_dbg(netdev, "promisc_supported = %lld\n",
5237 adapter->promisc_supported);
5240 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5241 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5242 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5245 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5246 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5247 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5249 case MAX_MULTICAST_FILTERS:
5250 adapter->max_multicast_filters =
5251 be64_to_cpu(crq->query_capability.number);
5252 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5253 adapter->max_multicast_filters);
5255 case VLAN_HEADER_INSERTION:
5256 adapter->vlan_header_insertion =
5257 be64_to_cpu(crq->query_capability.number);
5258 if (adapter->vlan_header_insertion)
5259 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5260 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5261 adapter->vlan_header_insertion);
5263 case RX_VLAN_HEADER_INSERTION:
5264 adapter->rx_vlan_header_insertion =
5265 be64_to_cpu(crq->query_capability.number);
5266 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5267 adapter->rx_vlan_header_insertion);
5269 case MAX_TX_SG_ENTRIES:
5270 adapter->max_tx_sg_entries =
5271 be64_to_cpu(crq->query_capability.number);
5272 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5273 adapter->max_tx_sg_entries);
5275 case RX_SG_SUPPORTED:
5276 adapter->rx_sg_supported =
5277 be64_to_cpu(crq->query_capability.number);
5278 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5279 adapter->rx_sg_supported);
5281 case OPT_TX_COMP_SUB_QUEUES:
5282 adapter->opt_tx_comp_sub_queues =
5283 be64_to_cpu(crq->query_capability.number);
5284 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5285 adapter->opt_tx_comp_sub_queues);
5287 case OPT_RX_COMP_QUEUES:
5288 adapter->opt_rx_comp_queues =
5289 be64_to_cpu(crq->query_capability.number);
5290 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5291 adapter->opt_rx_comp_queues);
5293 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5294 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5295 be64_to_cpu(crq->query_capability.number);
5296 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5297 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5299 case OPT_TX_ENTRIES_PER_SUBCRQ:
5300 adapter->opt_tx_entries_per_subcrq =
5301 be64_to_cpu(crq->query_capability.number);
5302 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5303 adapter->opt_tx_entries_per_subcrq);
5305 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5306 adapter->opt_rxba_entries_per_subcrq =
5307 be64_to_cpu(crq->query_capability.number);
5308 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5309 adapter->opt_rxba_entries_per_subcrq);
5311 case TX_RX_DESC_REQ:
5312 adapter->tx_rx_desc_req = crq->query_capability.number;
5313 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5314 adapter->tx_rx_desc_req);
5318 netdev_err(netdev, "Got invalid cap rsp %d\n",
5319 crq->query_capability.capability);
5323 if (atomic_read(&adapter->running_cap_crqs) == 0)
5324 send_request_cap(adapter, 0);
5327 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5329 union ibmvnic_crq crq;
5332 memset(&crq, 0, sizeof(crq));
5333 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5334 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5336 mutex_lock(&adapter->fw_lock);
5337 adapter->fw_done_rc = 0;
5338 reinit_completion(&adapter->fw_done);
5340 rc = ibmvnic_send_crq(adapter, &crq);
5342 mutex_unlock(&adapter->fw_lock);
5346 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5348 mutex_unlock(&adapter->fw_lock);
5352 mutex_unlock(&adapter->fw_lock);
5353 return adapter->fw_done_rc ? -EIO : 0;
5356 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5357 struct ibmvnic_adapter *adapter)
5359 struct net_device *netdev = adapter->netdev;
5361 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5363 rc = crq->query_phys_parms_rsp.rc.code;
5365 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5369 case IBMVNIC_10MBPS:
5370 adapter->speed = SPEED_10;
5372 case IBMVNIC_100MBPS:
5373 adapter->speed = SPEED_100;
5376 adapter->speed = SPEED_1000;
5378 case IBMVNIC_10GBPS:
5379 adapter->speed = SPEED_10000;
5381 case IBMVNIC_25GBPS:
5382 adapter->speed = SPEED_25000;
5384 case IBMVNIC_40GBPS:
5385 adapter->speed = SPEED_40000;
5387 case IBMVNIC_50GBPS:
5388 adapter->speed = SPEED_50000;
5390 case IBMVNIC_100GBPS:
5391 adapter->speed = SPEED_100000;
5393 case IBMVNIC_200GBPS:
5394 adapter->speed = SPEED_200000;
5397 if (netif_carrier_ok(netdev))
5398 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5399 adapter->speed = SPEED_UNKNOWN;
5401 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5402 adapter->duplex = DUPLEX_FULL;
5403 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5404 adapter->duplex = DUPLEX_HALF;
5406 adapter->duplex = DUPLEX_UNKNOWN;
5411 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5412 struct ibmvnic_adapter *adapter)
5414 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5415 struct net_device *netdev = adapter->netdev;
5416 struct device *dev = &adapter->vdev->dev;
5417 u64 *u64_crq = (u64 *)crq;
5420 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5421 (unsigned long)cpu_to_be64(u64_crq[0]),
5422 (unsigned long)cpu_to_be64(u64_crq[1]));
5423 switch (gen_crq->first) {
5424 case IBMVNIC_CRQ_INIT_RSP:
5425 switch (gen_crq->cmd) {
5426 case IBMVNIC_CRQ_INIT:
5427 dev_info(dev, "Partner initialized\n");
5428 adapter->from_passive_init = true;
5429 /* Discard any stale login responses from prev reset.
5430 * CHECK: should we clear even on INIT_COMPLETE?
5432 adapter->login_pending = false;
5434 if (adapter->state == VNIC_DOWN)
5435 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5437 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5439 if (rc && rc != -EBUSY) {
5440 /* We were unable to schedule the failover
5441 * reset either because the adapter was still
5442 * probing (eg: during kexec) or we could not
5443 * allocate memory. Clear the failover_pending
5444 * flag since no one else will. We ignore
5445 * EBUSY because it means either FAILOVER reset
5446 * is already scheduled or the adapter is
5450 "Error %ld scheduling failover reset\n",
5452 adapter->failover_pending = false;
5455 if (!completion_done(&adapter->init_done)) {
5456 if (!adapter->init_done_rc)
5457 adapter->init_done_rc = -EAGAIN;
5458 complete(&adapter->init_done);
5462 case IBMVNIC_CRQ_INIT_COMPLETE:
5463 dev_info(dev, "Partner initialization complete\n");
5464 adapter->crq.active = true;
5465 send_version_xchg(adapter);
5468 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5471 case IBMVNIC_CRQ_XPORT_EVENT:
5472 netif_carrier_off(netdev);
5473 adapter->crq.active = false;
5474 /* terminate any thread waiting for a response
5477 if (!completion_done(&adapter->fw_done)) {
5478 adapter->fw_done_rc = -EIO;
5479 complete(&adapter->fw_done);
5482 /* if we got here during crq-init, retry crq-init */
5483 if (!completion_done(&adapter->init_done)) {
5484 adapter->init_done_rc = -EAGAIN;
5485 complete(&adapter->init_done);
5488 if (!completion_done(&adapter->stats_done))
5489 complete(&adapter->stats_done);
5490 if (test_bit(0, &adapter->resetting))
5491 adapter->force_reset_recovery = true;
5492 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5493 dev_info(dev, "Migrated, re-enabling adapter\n");
5494 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5495 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5496 dev_info(dev, "Backing device failover detected\n");
5497 adapter->failover_pending = true;
5499 /* The adapter lost the connection */
5500 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5502 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5505 case IBMVNIC_CRQ_CMD_RSP:
5508 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5513 switch (gen_crq->cmd) {
5514 case VERSION_EXCHANGE_RSP:
5515 rc = crq->version_exchange_rsp.rc.code;
5517 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5521 be16_to_cpu(crq->version_exchange_rsp.version);
5522 dev_info(dev, "Partner protocol version is %d\n",
5524 send_query_cap(adapter);
5526 case QUERY_CAPABILITY_RSP:
5527 handle_query_cap_rsp(crq, adapter);
5530 handle_query_map_rsp(crq, adapter);
5532 case REQUEST_MAP_RSP:
5533 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5534 complete(&adapter->fw_done);
5536 case REQUEST_UNMAP_RSP:
5537 handle_request_unmap_rsp(crq, adapter);
5539 case REQUEST_CAPABILITY_RSP:
5540 handle_request_cap_rsp(crq, adapter);
5543 netdev_dbg(netdev, "Got Login Response\n");
5544 handle_login_rsp(crq, adapter);
5546 case LOGICAL_LINK_STATE_RSP:
5548 "Got Logical Link State Response, state: %d rc: %d\n",
5549 crq->logical_link_state_rsp.link_state,
5550 crq->logical_link_state_rsp.rc.code);
5551 adapter->logical_link_state =
5552 crq->logical_link_state_rsp.link_state;
5553 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5554 complete(&adapter->init_done);
5556 case LINK_STATE_INDICATION:
5557 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5558 adapter->phys_link_state =
5559 crq->link_state_indication.phys_link_state;
5560 adapter->logical_link_state =
5561 crq->link_state_indication.logical_link_state;
5562 if (adapter->phys_link_state && adapter->logical_link_state)
5563 netif_carrier_on(netdev);
5565 netif_carrier_off(netdev);
5567 case CHANGE_MAC_ADDR_RSP:
5568 netdev_dbg(netdev, "Got MAC address change Response\n");
5569 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5571 case ERROR_INDICATION:
5572 netdev_dbg(netdev, "Got Error Indication\n");
5573 handle_error_indication(crq, adapter);
5575 case REQUEST_STATISTICS_RSP:
5576 netdev_dbg(netdev, "Got Statistics Response\n");
5577 complete(&adapter->stats_done);
5579 case QUERY_IP_OFFLOAD_RSP:
5580 netdev_dbg(netdev, "Got Query IP offload Response\n");
5581 handle_query_ip_offload_rsp(adapter);
5583 case MULTICAST_CTRL_RSP:
5584 netdev_dbg(netdev, "Got multicast control Response\n");
5586 case CONTROL_IP_OFFLOAD_RSP:
5587 netdev_dbg(netdev, "Got Control IP offload Response\n");
5588 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5589 sizeof(adapter->ip_offload_ctrl),
5591 complete(&adapter->init_done);
5593 case COLLECT_FW_TRACE_RSP:
5594 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5595 complete(&adapter->fw_done);
5597 case GET_VPD_SIZE_RSP:
5598 handle_vpd_size_rsp(crq, adapter);
5601 handle_vpd_rsp(crq, adapter);
5603 case QUERY_PHYS_PARMS_RSP:
5604 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5605 complete(&adapter->fw_done);
5608 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5613 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5615 struct ibmvnic_adapter *adapter = instance;
5617 tasklet_schedule(&adapter->tasklet);
5621 static void ibmvnic_tasklet(struct tasklet_struct *t)
5623 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5624 struct ibmvnic_crq_queue *queue = &adapter->crq;
5625 union ibmvnic_crq *crq;
5626 unsigned long flags;
5628 spin_lock_irqsave(&queue->lock, flags);
5630 /* Pull all the valid messages off the CRQ */
5631 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5632 /* This barrier makes sure ibmvnic_next_crq()'s
5633 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5634 * before ibmvnic_handle_crq()'s
5635 * switch(gen_crq->first) and switch(gen_crq->cmd).
5638 ibmvnic_handle_crq(crq, adapter);
5639 crq->generic.first = 0;
5642 spin_unlock_irqrestore(&queue->lock, flags);
5645 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5647 struct vio_dev *vdev = adapter->vdev;
5651 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5652 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5655 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5660 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5662 struct ibmvnic_crq_queue *crq = &adapter->crq;
5663 struct device *dev = &adapter->vdev->dev;
5664 struct vio_dev *vdev = adapter->vdev;
5669 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5670 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5672 /* Clean out the queue */
5676 memset(crq->msgs, 0, PAGE_SIZE);
5678 crq->active = false;
5680 /* And re-open it again */
5681 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5682 crq->msg_token, PAGE_SIZE);
5685 /* Adapter is good, but other end is not ready */
5686 dev_warn(dev, "Partner adapter not ready\n");
5688 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5693 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5695 struct ibmvnic_crq_queue *crq = &adapter->crq;
5696 struct vio_dev *vdev = adapter->vdev;
5702 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5703 free_irq(vdev->irq, adapter);
5704 tasklet_kill(&adapter->tasklet);
5706 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5707 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5709 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5711 free_page((unsigned long)crq->msgs);
5713 crq->active = false;
5716 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5718 struct ibmvnic_crq_queue *crq = &adapter->crq;
5719 struct device *dev = &adapter->vdev->dev;
5720 struct vio_dev *vdev = adapter->vdev;
5721 int rc, retrc = -ENOMEM;
5726 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5727 /* Should we allocate more than one page? */
5732 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5733 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5735 if (dma_mapping_error(dev, crq->msg_token))
5738 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5739 crq->msg_token, PAGE_SIZE);
5741 if (rc == H_RESOURCE)
5742 /* maybe kexecing and resource is busy. try a reset */
5743 rc = ibmvnic_reset_crq(adapter);
5746 if (rc == H_CLOSED) {
5747 dev_warn(dev, "Partner adapter not ready\n");
5749 dev_warn(dev, "Error %d opening adapter\n", rc);
5750 goto reg_crq_failed;
5755 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5757 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5758 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5759 adapter->vdev->unit_address);
5760 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5762 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5764 goto req_irq_failed;
5767 rc = vio_enable_interrupts(vdev);
5769 dev_err(dev, "Error %d enabling interrupts\n", rc);
5770 goto req_irq_failed;
5774 spin_lock_init(&crq->lock);
5776 /* process any CRQs that were queued before we enabled interrupts */
5777 tasklet_schedule(&adapter->tasklet);
5782 tasklet_kill(&adapter->tasklet);
5784 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5785 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5787 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5789 free_page((unsigned long)crq->msgs);
5794 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5796 struct device *dev = &adapter->vdev->dev;
5797 unsigned long timeout = msecs_to_jiffies(20000);
5798 u64 old_num_rx_queues = adapter->req_rx_queues;
5799 u64 old_num_tx_queues = adapter->req_tx_queues;
5802 adapter->from_passive_init = false;
5804 rc = ibmvnic_send_crq_init(adapter);
5806 dev_err(dev, "Send crq init failed with error %d\n", rc);
5810 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5811 dev_err(dev, "Initialization sequence timed out\n");
5815 if (adapter->init_done_rc) {
5816 release_crq_queue(adapter);
5817 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
5818 return adapter->init_done_rc;
5821 if (adapter->from_passive_init) {
5822 adapter->state = VNIC_OPEN;
5823 adapter->from_passive_init = false;
5824 dev_err(dev, "CRQ-init failed, passive-init\n");
5829 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5830 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5831 if (adapter->req_rx_queues != old_num_rx_queues ||
5832 adapter->req_tx_queues != old_num_tx_queues) {
5833 release_sub_crqs(adapter, 0);
5834 rc = init_sub_crqs(adapter);
5836 rc = reset_sub_crq_queues(adapter);
5839 rc = init_sub_crqs(adapter);
5843 dev_err(dev, "Initialization of sub crqs failed\n");
5844 release_crq_queue(adapter);
5848 rc = init_sub_crq_irqs(adapter);
5850 dev_err(dev, "Failed to initialize sub crq irqs\n");
5851 release_crq_queue(adapter);
5857 static struct device_attribute dev_attr_failover;
5859 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5861 struct ibmvnic_adapter *adapter;
5862 struct net_device *netdev;
5863 unsigned char *mac_addr_p;
5864 unsigned long flags;
5868 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5871 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5872 VETH_MAC_ADDR, NULL);
5875 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5876 __FILE__, __LINE__);
5880 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5881 IBMVNIC_MAX_QUEUES);
5885 adapter = netdev_priv(netdev);
5886 adapter->state = VNIC_PROBING;
5887 dev_set_drvdata(&dev->dev, netdev);
5888 adapter->vdev = dev;
5889 adapter->netdev = netdev;
5890 adapter->login_pending = false;
5891 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
5892 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
5893 bitmap_set(adapter->map_ids, 0, 1);
5895 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5896 eth_hw_addr_set(netdev, adapter->mac_addr);
5897 netdev->irq = dev->irq;
5898 netdev->netdev_ops = &ibmvnic_netdev_ops;
5899 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5900 SET_NETDEV_DEV(netdev, &dev->dev);
5902 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5903 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5904 __ibmvnic_delayed_reset);
5905 INIT_LIST_HEAD(&adapter->rwi_list);
5906 spin_lock_init(&adapter->rwi_lock);
5907 spin_lock_init(&adapter->state_lock);
5908 mutex_init(&adapter->fw_lock);
5909 init_completion(&adapter->probe_done);
5910 init_completion(&adapter->init_done);
5911 init_completion(&adapter->fw_done);
5912 init_completion(&adapter->reset_done);
5913 init_completion(&adapter->stats_done);
5914 clear_bit(0, &adapter->resetting);
5915 adapter->prev_rx_buf_sz = 0;
5916 adapter->prev_mtu = 0;
5918 init_success = false;
5920 reinit_init_done(adapter);
5922 /* clear any failovers we got in the previous pass
5923 * since we are reinitializing the CRQ
5925 adapter->failover_pending = false;
5927 /* If we had already initialized CRQ, we may have one or
5928 * more resets queued already. Discard those and release
5929 * the CRQ before initializing the CRQ again.
5931 release_crq_queue(adapter);
5933 /* Since we are still in PROBING state, __ibmvnic_reset()
5934 * will not access the ->rwi_list and since we released CRQ,
5935 * we won't get _new_ transport events. But there maybe an
5936 * ongoing ibmvnic_reset() call. So serialize access to
5937 * rwi_list. If we win the race, ibvmnic_reset() could add
5938 * a reset after we purged but thats ok - we just may end
5939 * up with an extra reset (i.e similar to having two or more
5940 * resets in the queue at once).
5943 spin_lock_irqsave(&adapter->rwi_lock, flags);
5944 flush_reset_queue(adapter);
5945 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
5947 rc = init_crq_queue(adapter);
5949 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5951 goto ibmvnic_init_fail;
5954 rc = ibmvnic_reset_init(adapter, false);
5955 } while (rc == -EAGAIN);
5957 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5958 * partner is not ready. CRQ is not active. When the partner becomes
5959 * ready, we will do the passive init reset.
5963 init_success = true;
5965 rc = init_stats_buffers(adapter);
5967 goto ibmvnic_init_fail;
5969 rc = init_stats_token(adapter);
5971 goto ibmvnic_stats_fail;
5973 rc = device_create_file(&dev->dev, &dev_attr_failover);
5975 goto ibmvnic_dev_file_err;
5977 netif_carrier_off(netdev);
5980 adapter->state = VNIC_PROBED;
5981 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5982 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5983 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5985 adapter->state = VNIC_DOWN;
5988 adapter->wait_for_reset = false;
5989 adapter->last_reset_time = jiffies;
5991 rc = register_netdev(netdev);
5993 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5994 goto ibmvnic_register_fail;
5996 dev_info(&dev->dev, "ibmvnic registered\n");
5998 complete(&adapter->probe_done);
6002 ibmvnic_register_fail:
6003 device_remove_file(&dev->dev, &dev_attr_failover);
6005 ibmvnic_dev_file_err:
6006 release_stats_token(adapter);
6009 release_stats_buffers(adapter);
6012 release_sub_crqs(adapter, 1);
6013 release_crq_queue(adapter);
6015 /* cleanup worker thread after releasing CRQ so we don't get
6016 * transport events (i.e new work items for the worker thread).
6018 adapter->state = VNIC_REMOVING;
6019 complete(&adapter->probe_done);
6020 flush_work(&adapter->ibmvnic_reset);
6021 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6023 flush_reset_queue(adapter);
6025 mutex_destroy(&adapter->fw_lock);
6026 free_netdev(netdev);
6031 static void ibmvnic_remove(struct vio_dev *dev)
6033 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6034 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6035 unsigned long flags;
6037 spin_lock_irqsave(&adapter->state_lock, flags);
6039 /* If ibmvnic_reset() is scheduling a reset, wait for it to
6040 * finish. Then, set the state to REMOVING to prevent it from
6041 * scheduling any more work and to have reset functions ignore
6042 * any resets that have already been scheduled. Drop the lock
6043 * after setting state, so __ibmvnic_reset() which is called
6044 * from the flush_work() below, can make progress.
6046 spin_lock(&adapter->rwi_lock);
6047 adapter->state = VNIC_REMOVING;
6048 spin_unlock(&adapter->rwi_lock);
6050 spin_unlock_irqrestore(&adapter->state_lock, flags);
6052 flush_work(&adapter->ibmvnic_reset);
6053 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6056 unregister_netdevice(netdev);
6058 release_resources(adapter);
6059 release_rx_pools(adapter);
6060 release_tx_pools(adapter);
6061 release_sub_crqs(adapter, 1);
6062 release_crq_queue(adapter);
6064 release_stats_token(adapter);
6065 release_stats_buffers(adapter);
6067 adapter->state = VNIC_REMOVED;
6070 mutex_destroy(&adapter->fw_lock);
6071 device_remove_file(&dev->dev, &dev_attr_failover);
6072 free_netdev(netdev);
6073 dev_set_drvdata(&dev->dev, NULL);
6076 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6077 const char *buf, size_t count)
6079 struct net_device *netdev = dev_get_drvdata(dev);
6080 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6081 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6082 __be64 session_token;
6085 if (!sysfs_streq(buf, "1"))
6088 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6089 H_GET_SESSION_TOKEN, 0, 0, 0);
6091 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6096 session_token = (__be64)retbuf[0];
6097 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6098 be64_to_cpu(session_token));
6099 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6100 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6103 "H_VIOCTL initiated failover failed, rc %ld\n",
6111 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6112 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6116 static DEVICE_ATTR_WO(failover);
6118 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6120 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6121 struct ibmvnic_adapter *adapter;
6122 struct iommu_table *tbl;
6123 unsigned long ret = 0;
6126 tbl = get_iommu_table_base(&vdev->dev);
6128 /* netdev inits at probe time along with the structures we need below*/
6130 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6132 adapter = netdev_priv(netdev);
6134 ret += PAGE_SIZE; /* the crq message queue */
6135 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6137 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6138 ret += 4 * PAGE_SIZE; /* the scrq message queue */
6140 for (i = 0; i < adapter->num_active_rx_pools; i++)
6141 ret += adapter->rx_pool[i].size *
6142 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6147 static int ibmvnic_resume(struct device *dev)
6149 struct net_device *netdev = dev_get_drvdata(dev);
6150 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6152 if (adapter->state != VNIC_OPEN)
6155 tasklet_schedule(&adapter->tasklet);
6160 static const struct vio_device_id ibmvnic_device_table[] = {
6161 {"network", "IBM,vnic"},
6164 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6166 static const struct dev_pm_ops ibmvnic_pm_ops = {
6167 .resume = ibmvnic_resume
6170 static struct vio_driver ibmvnic_driver = {
6171 .id_table = ibmvnic_device_table,
6172 .probe = ibmvnic_probe,
6173 .remove = ibmvnic_remove,
6174 .get_desired_dma = ibmvnic_get_desired_dma,
6175 .name = ibmvnic_driver_name,
6176 .pm = &ibmvnic_pm_ops,
6179 /* module functions */
6180 static int __init ibmvnic_module_init(void)
6182 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6183 IBMVNIC_DRIVER_VERSION);
6185 return vio_register_driver(&ibmvnic_driver);
6188 static void __exit ibmvnic_module_exit(void)
6190 vio_unregister_driver(&ibmvnic_driver);
6193 module_init(ibmvnic_module_init);
6194 module_exit(ibmvnic_module_exit);