1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
82 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
86 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
87 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88 static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90 static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92 static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96 static int ibmvnic_poll(struct napi_struct *napi, int data);
97 static void send_query_map(struct ibmvnic_adapter *adapter);
98 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
99 static int send_request_unmap(struct ibmvnic_adapter *, u8);
100 static int send_login(struct ibmvnic_adapter *adapter);
101 static void send_query_cap(struct ibmvnic_adapter *adapter);
102 static int init_sub_crqs(struct ibmvnic_adapter *);
103 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
104 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
105 static void release_crq_queue(struct ibmvnic_adapter *);
106 static int __ibmvnic_set_mac(struct net_device *, u8 *);
107 static int init_crq_queue(struct ibmvnic_adapter *adapter);
108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
110 struct ibmvnic_stat {
111 char name[ETH_GSTRING_LEN];
115 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
116 offsetof(struct ibmvnic_statistics, stat))
117 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
119 static const struct ibmvnic_stat ibmvnic_stats[] = {
120 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
121 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
122 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
123 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
124 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
125 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
126 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
127 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
128 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
129 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
130 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
131 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
132 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
133 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
134 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
135 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
136 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
137 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
138 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
139 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
140 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
141 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
144 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
145 unsigned long length, unsigned long *number,
148 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
151 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
159 * ibmvnic_wait_for_completion - Check device state and wait for completion
160 * @adapter: private device data
161 * @comp_done: completion structure to wait for
162 * @timeout: time to wait in milliseconds
164 * Wait for a completion signal or until the timeout limit is reached
165 * while checking that the device is still active.
167 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
168 struct completion *comp_done,
169 unsigned long timeout)
171 struct net_device *netdev;
172 unsigned long div_timeout;
175 netdev = adapter->netdev;
177 div_timeout = msecs_to_jiffies(timeout / retry);
179 if (!adapter->crq.active) {
180 netdev_err(netdev, "Device down!\n");
185 if (wait_for_completion_timeout(comp_done, div_timeout))
188 netdev_err(netdev, "Operation timed out.\n");
192 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
193 struct ibmvnic_long_term_buff *ltb, int size)
195 struct device *dev = &adapter->vdev->dev;
199 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
203 dev_err(dev, "Couldn't alloc long term buffer\n");
206 ltb->map_id = adapter->map_id;
209 mutex_lock(&adapter->fw_lock);
210 adapter->fw_done_rc = 0;
211 reinit_completion(&adapter->fw_done);
212 rc = send_request_map(adapter, ltb->addr,
213 ltb->size, ltb->map_id);
215 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
216 mutex_unlock(&adapter->fw_lock);
220 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
223 "Long term map request aborted or timed out,rc = %d\n",
225 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
226 mutex_unlock(&adapter->fw_lock);
230 if (adapter->fw_done_rc) {
231 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
232 adapter->fw_done_rc);
233 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
234 mutex_unlock(&adapter->fw_lock);
237 mutex_unlock(&adapter->fw_lock);
241 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
242 struct ibmvnic_long_term_buff *ltb)
244 struct device *dev = &adapter->vdev->dev;
249 /* VIOS automatically unmaps the long term buffer at remote
250 * end for the following resets:
251 * FAILOVER, MOBILITY, TIMEOUT.
253 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
254 adapter->reset_reason != VNIC_RESET_MOBILITY &&
255 adapter->reset_reason != VNIC_RESET_TIMEOUT)
256 send_request_unmap(adapter, ltb->map_id);
257 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
260 static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
265 memset(ltb->buff, 0, ltb->size);
269 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
273 for (i = 0; i < adapter->num_active_rx_pools; i++)
274 adapter->rx_pool[i].active = 0;
277 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
278 struct ibmvnic_rx_pool *pool)
280 int count = pool->size - atomic_read(&pool->available);
281 u64 handle = adapter->rx_scrq[pool->index]->handle;
282 struct device *dev = &adapter->vdev->dev;
283 struct ibmvnic_ind_xmit_queue *ind_bufp;
284 struct ibmvnic_sub_crq_queue *rx_scrq;
285 union sub_crq *sub_crq;
286 int buffers_added = 0;
287 unsigned long lpar_rc;
299 rx_scrq = adapter->rx_scrq[pool->index];
300 ind_bufp = &rx_scrq->ind_buf;
301 for (i = 0; i < count; ++i) {
302 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
304 dev_err(dev, "Couldn't replenish rx buff\n");
305 adapter->replenish_no_mem++;
309 index = pool->free_map[pool->next_free];
311 if (pool->rx_buff[index].skb)
312 dev_err(dev, "Inconsistent free_map!\n");
314 /* Copy the skb to the long term mapped DMA buffer */
315 offset = index * pool->buff_size;
316 dst = pool->long_term_buff.buff + offset;
317 memset(dst, 0, pool->buff_size);
318 dma_addr = pool->long_term_buff.addr + offset;
319 pool->rx_buff[index].data = dst;
321 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
322 pool->rx_buff[index].dma = dma_addr;
323 pool->rx_buff[index].skb = skb;
324 pool->rx_buff[index].pool_index = pool->index;
325 pool->rx_buff[index].size = pool->buff_size;
327 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
328 memset(sub_crq, 0, sizeof(*sub_crq));
329 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
330 sub_crq->rx_add.correlator =
331 cpu_to_be64((u64)&pool->rx_buff[index]);
332 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
333 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
335 /* The length field of the sCRQ is defined to be 24 bits so the
336 * buffer size needs to be left shifted by a byte before it is
337 * converted to big endian to prevent the last byte from being
340 #ifdef __LITTLE_ENDIAN__
343 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
344 pool->next_free = (pool->next_free + 1) % pool->size;
345 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
348 send_subcrq_indirect(adapter, handle,
349 (u64)ind_bufp->indir_dma,
350 (u64)ind_bufp->index);
351 if (lpar_rc != H_SUCCESS)
353 buffers_added += ind_bufp->index;
354 adapter->replenish_add_buff_success += ind_bufp->index;
358 atomic_add(buffers_added, &pool->available);
362 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
363 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
364 for (i = ind_bufp->index - 1; i >= 0; --i) {
365 struct ibmvnic_rx_buff *rx_buff;
367 pool->next_free = pool->next_free == 0 ?
368 pool->size - 1 : pool->next_free - 1;
369 sub_crq = &ind_bufp->indir_arr[i];
370 rx_buff = (struct ibmvnic_rx_buff *)
371 be64_to_cpu(sub_crq->rx_add.correlator);
372 index = (int)(rx_buff - pool->rx_buff);
373 pool->free_map[pool->next_free] = index;
374 dev_kfree_skb_any(pool->rx_buff[index].skb);
375 pool->rx_buff[index].skb = NULL;
377 adapter->replenish_add_buff_failure += ind_bufp->index;
378 atomic_add(buffers_added, &pool->available);
380 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
381 /* Disable buffer pool replenishment and report carrier off if
382 * queue is closed or pending failover.
383 * Firmware guarantees that a signal will be sent to the
384 * driver, triggering a reset.
386 deactivate_rx_pools(adapter);
387 netif_carrier_off(adapter->netdev);
391 static void replenish_pools(struct ibmvnic_adapter *adapter)
395 adapter->replenish_task_cycles++;
396 for (i = 0; i < adapter->num_active_rx_pools; i++) {
397 if (adapter->rx_pool[i].active)
398 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
401 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
404 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
406 kfree(adapter->tx_stats_buffers);
407 kfree(adapter->rx_stats_buffers);
408 adapter->tx_stats_buffers = NULL;
409 adapter->rx_stats_buffers = NULL;
412 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
414 adapter->tx_stats_buffers =
415 kcalloc(IBMVNIC_MAX_QUEUES,
416 sizeof(struct ibmvnic_tx_queue_stats),
418 if (!adapter->tx_stats_buffers)
421 adapter->rx_stats_buffers =
422 kcalloc(IBMVNIC_MAX_QUEUES,
423 sizeof(struct ibmvnic_rx_queue_stats),
425 if (!adapter->rx_stats_buffers)
431 static void release_stats_token(struct ibmvnic_adapter *adapter)
433 struct device *dev = &adapter->vdev->dev;
435 if (!adapter->stats_token)
438 dma_unmap_single(dev, adapter->stats_token,
439 sizeof(struct ibmvnic_statistics),
441 adapter->stats_token = 0;
444 static int init_stats_token(struct ibmvnic_adapter *adapter)
446 struct device *dev = &adapter->vdev->dev;
449 stok = dma_map_single(dev, &adapter->stats,
450 sizeof(struct ibmvnic_statistics),
452 if (dma_mapping_error(dev, stok)) {
453 dev_err(dev, "Couldn't map stats buffer\n");
457 adapter->stats_token = stok;
458 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
462 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
464 struct ibmvnic_rx_pool *rx_pool;
469 if (!adapter->rx_pool)
472 buff_size = adapter->cur_rx_buf_sz;
473 rx_scrqs = adapter->num_active_rx_pools;
474 for (i = 0; i < rx_scrqs; i++) {
475 rx_pool = &adapter->rx_pool[i];
477 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
479 if (rx_pool->buff_size != buff_size) {
480 free_long_term_buff(adapter, &rx_pool->long_term_buff);
481 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
482 rc = alloc_long_term_buff(adapter,
483 &rx_pool->long_term_buff,
487 rc = reset_long_term_buff(&rx_pool->long_term_buff);
493 for (j = 0; j < rx_pool->size; j++)
494 rx_pool->free_map[j] = j;
496 memset(rx_pool->rx_buff, 0,
497 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
499 atomic_set(&rx_pool->available, 0);
500 rx_pool->next_alloc = 0;
501 rx_pool->next_free = 0;
508 static void release_rx_pools(struct ibmvnic_adapter *adapter)
510 struct ibmvnic_rx_pool *rx_pool;
513 if (!adapter->rx_pool)
516 for (i = 0; i < adapter->num_active_rx_pools; i++) {
517 rx_pool = &adapter->rx_pool[i];
519 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
521 kfree(rx_pool->free_map);
522 free_long_term_buff(adapter, &rx_pool->long_term_buff);
524 if (!rx_pool->rx_buff)
527 for (j = 0; j < rx_pool->size; j++) {
528 if (rx_pool->rx_buff[j].skb) {
529 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
530 rx_pool->rx_buff[j].skb = NULL;
534 kfree(rx_pool->rx_buff);
537 kfree(adapter->rx_pool);
538 adapter->rx_pool = NULL;
539 adapter->num_active_rx_pools = 0;
542 static int init_rx_pools(struct net_device *netdev)
544 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
545 struct device *dev = &adapter->vdev->dev;
546 struct ibmvnic_rx_pool *rx_pool;
551 rxadd_subcrqs = adapter->num_active_rx_scrqs;
552 buff_size = adapter->cur_rx_buf_sz;
554 adapter->rx_pool = kcalloc(rxadd_subcrqs,
555 sizeof(struct ibmvnic_rx_pool),
557 if (!adapter->rx_pool) {
558 dev_err(dev, "Failed to allocate rx pools\n");
562 adapter->num_active_rx_pools = rxadd_subcrqs;
564 for (i = 0; i < rxadd_subcrqs; i++) {
565 rx_pool = &adapter->rx_pool[i];
567 netdev_dbg(adapter->netdev,
568 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
569 i, adapter->req_rx_add_entries_per_subcrq,
572 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
574 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
577 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
579 if (!rx_pool->free_map) {
580 release_rx_pools(adapter);
584 rx_pool->rx_buff = kcalloc(rx_pool->size,
585 sizeof(struct ibmvnic_rx_buff),
587 if (!rx_pool->rx_buff) {
588 dev_err(dev, "Couldn't alloc rx buffers\n");
589 release_rx_pools(adapter);
593 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
594 rx_pool->size * rx_pool->buff_size)) {
595 release_rx_pools(adapter);
599 for (j = 0; j < rx_pool->size; ++j)
600 rx_pool->free_map[j] = j;
602 atomic_set(&rx_pool->available, 0);
603 rx_pool->next_alloc = 0;
604 rx_pool->next_free = 0;
610 static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
614 rc = reset_long_term_buff(&tx_pool->long_term_buff);
618 memset(tx_pool->tx_buff, 0,
619 tx_pool->num_buffers *
620 sizeof(struct ibmvnic_tx_buff));
622 for (i = 0; i < tx_pool->num_buffers; i++)
623 tx_pool->free_map[i] = i;
625 tx_pool->consumer_index = 0;
626 tx_pool->producer_index = 0;
631 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
636 if (!adapter->tx_pool)
639 tx_scrqs = adapter->num_active_tx_pools;
640 for (i = 0; i < tx_scrqs; i++) {
641 rc = reset_one_tx_pool(&adapter->tso_pool[i]);
644 rc = reset_one_tx_pool(&adapter->tx_pool[i]);
652 static void release_vpd_data(struct ibmvnic_adapter *adapter)
657 kfree(adapter->vpd->buff);
663 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
664 struct ibmvnic_tx_pool *tx_pool)
666 kfree(tx_pool->tx_buff);
667 kfree(tx_pool->free_map);
668 free_long_term_buff(adapter, &tx_pool->long_term_buff);
671 static void release_tx_pools(struct ibmvnic_adapter *adapter)
675 if (!adapter->tx_pool)
678 for (i = 0; i < adapter->num_active_tx_pools; i++) {
679 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
680 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
683 kfree(adapter->tx_pool);
684 adapter->tx_pool = NULL;
685 kfree(adapter->tso_pool);
686 adapter->tso_pool = NULL;
687 adapter->num_active_tx_pools = 0;
690 static int init_one_tx_pool(struct net_device *netdev,
691 struct ibmvnic_tx_pool *tx_pool,
692 int num_entries, int buf_size)
694 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
697 tx_pool->tx_buff = kcalloc(num_entries,
698 sizeof(struct ibmvnic_tx_buff),
700 if (!tx_pool->tx_buff)
703 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
704 num_entries * buf_size))
707 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
708 if (!tx_pool->free_map)
711 for (i = 0; i < num_entries; i++)
712 tx_pool->free_map[i] = i;
714 tx_pool->consumer_index = 0;
715 tx_pool->producer_index = 0;
716 tx_pool->num_buffers = num_entries;
717 tx_pool->buf_size = buf_size;
722 static int init_tx_pools(struct net_device *netdev)
724 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
729 tx_subcrqs = adapter->num_active_tx_scrqs;
730 adapter->tx_pool = kcalloc(tx_subcrqs,
731 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
732 if (!adapter->tx_pool)
735 adapter->tso_pool = kcalloc(tx_subcrqs,
736 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
737 if (!adapter->tso_pool)
740 adapter->num_active_tx_pools = tx_subcrqs;
742 for (i = 0; i < tx_subcrqs; i++) {
743 buff_size = adapter->req_mtu + VLAN_HLEN;
744 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
745 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
746 adapter->req_tx_entries_per_subcrq,
749 release_tx_pools(adapter);
753 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
757 release_tx_pools(adapter);
765 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
769 if (adapter->napi_enabled)
772 for (i = 0; i < adapter->req_rx_queues; i++)
773 napi_enable(&adapter->napi[i]);
775 adapter->napi_enabled = true;
778 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
782 if (!adapter->napi_enabled)
785 for (i = 0; i < adapter->req_rx_queues; i++) {
786 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
787 napi_disable(&adapter->napi[i]);
790 adapter->napi_enabled = false;
793 static int init_napi(struct ibmvnic_adapter *adapter)
797 adapter->napi = kcalloc(adapter->req_rx_queues,
798 sizeof(struct napi_struct), GFP_KERNEL);
802 for (i = 0; i < adapter->req_rx_queues; i++) {
803 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
804 netif_napi_add(adapter->netdev, &adapter->napi[i],
805 ibmvnic_poll, NAPI_POLL_WEIGHT);
808 adapter->num_active_rx_napi = adapter->req_rx_queues;
812 static void release_napi(struct ibmvnic_adapter *adapter)
819 for (i = 0; i < adapter->num_active_rx_napi; i++) {
820 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
821 netif_napi_del(&adapter->napi[i]);
824 kfree(adapter->napi);
825 adapter->napi = NULL;
826 adapter->num_active_rx_napi = 0;
827 adapter->napi_enabled = false;
830 static int ibmvnic_login(struct net_device *netdev)
832 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
833 unsigned long timeout = msecs_to_jiffies(20000);
841 if (retry_count > retries) {
842 netdev_warn(netdev, "Login attempts exceeded\n");
846 adapter->init_done_rc = 0;
847 reinit_completion(&adapter->init_done);
848 rc = send_login(adapter);
852 if (!wait_for_completion_timeout(&adapter->init_done,
854 netdev_warn(netdev, "Login timed out, retrying...\n");
856 adapter->init_done_rc = 0;
861 if (adapter->init_done_rc == ABORTED) {
862 netdev_warn(netdev, "Login aborted, retrying...\n");
864 adapter->init_done_rc = 0;
866 /* FW or device may be busy, so
867 * wait a bit before retrying login
870 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
872 release_sub_crqs(adapter, 1);
876 "Received partial success, retrying...\n");
877 adapter->init_done_rc = 0;
878 reinit_completion(&adapter->init_done);
879 send_query_cap(adapter);
880 if (!wait_for_completion_timeout(&adapter->init_done,
883 "Capabilities query timed out\n");
887 rc = init_sub_crqs(adapter);
890 "SCRQ initialization failed\n");
894 rc = init_sub_crq_irqs(adapter);
897 "SCRQ irq initialization failed\n");
900 } else if (adapter->init_done_rc) {
901 netdev_warn(netdev, "Adapter login failed\n");
906 __ibmvnic_set_mac(netdev, adapter->mac_addr);
908 netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
912 static void release_login_buffer(struct ibmvnic_adapter *adapter)
914 kfree(adapter->login_buf);
915 adapter->login_buf = NULL;
918 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
920 kfree(adapter->login_rsp_buf);
921 adapter->login_rsp_buf = NULL;
924 static void release_resources(struct ibmvnic_adapter *adapter)
926 release_vpd_data(adapter);
928 release_tx_pools(adapter);
929 release_rx_pools(adapter);
931 release_napi(adapter);
932 release_login_buffer(adapter);
933 release_login_rsp_buffer(adapter);
936 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
938 struct net_device *netdev = adapter->netdev;
939 unsigned long timeout = msecs_to_jiffies(20000);
940 union ibmvnic_crq crq;
944 netdev_dbg(netdev, "setting link state %d\n", link_state);
946 memset(&crq, 0, sizeof(crq));
947 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
948 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
949 crq.logical_link_state.link_state = link_state;
954 reinit_completion(&adapter->init_done);
955 rc = ibmvnic_send_crq(adapter, &crq);
957 netdev_err(netdev, "Failed to set link state\n");
961 if (!wait_for_completion_timeout(&adapter->init_done,
963 netdev_err(netdev, "timeout setting link state\n");
967 if (adapter->init_done_rc == PARTIALSUCCESS) {
968 /* Partuial success, delay and re-send */
971 } else if (adapter->init_done_rc) {
972 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
973 adapter->init_done_rc);
974 return adapter->init_done_rc;
981 static int set_real_num_queues(struct net_device *netdev)
983 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
986 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
987 adapter->req_tx_queues, adapter->req_rx_queues);
989 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
991 netdev_err(netdev, "failed to set the number of tx queues\n");
995 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
997 netdev_err(netdev, "failed to set the number of rx queues\n");
1002 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1004 struct device *dev = &adapter->vdev->dev;
1005 union ibmvnic_crq crq;
1009 if (adapter->vpd->buff)
1010 len = adapter->vpd->len;
1012 mutex_lock(&adapter->fw_lock);
1013 adapter->fw_done_rc = 0;
1014 reinit_completion(&adapter->fw_done);
1016 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1017 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1018 rc = ibmvnic_send_crq(adapter, &crq);
1020 mutex_unlock(&adapter->fw_lock);
1024 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1026 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1027 mutex_unlock(&adapter->fw_lock);
1030 mutex_unlock(&adapter->fw_lock);
1032 if (!adapter->vpd->len)
1035 if (!adapter->vpd->buff)
1036 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1037 else if (adapter->vpd->len != len)
1038 adapter->vpd->buff =
1039 krealloc(adapter->vpd->buff,
1040 adapter->vpd->len, GFP_KERNEL);
1042 if (!adapter->vpd->buff) {
1043 dev_err(dev, "Could allocate VPD buffer\n");
1047 adapter->vpd->dma_addr =
1048 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1050 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1051 dev_err(dev, "Could not map VPD buffer\n");
1052 kfree(adapter->vpd->buff);
1053 adapter->vpd->buff = NULL;
1057 mutex_lock(&adapter->fw_lock);
1058 adapter->fw_done_rc = 0;
1059 reinit_completion(&adapter->fw_done);
1061 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1062 crq.get_vpd.cmd = GET_VPD;
1063 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1064 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1065 rc = ibmvnic_send_crq(adapter, &crq);
1067 kfree(adapter->vpd->buff);
1068 adapter->vpd->buff = NULL;
1069 mutex_unlock(&adapter->fw_lock);
1073 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1075 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1076 kfree(adapter->vpd->buff);
1077 adapter->vpd->buff = NULL;
1078 mutex_unlock(&adapter->fw_lock);
1082 mutex_unlock(&adapter->fw_lock);
1086 static int init_resources(struct ibmvnic_adapter *adapter)
1088 struct net_device *netdev = adapter->netdev;
1091 rc = set_real_num_queues(netdev);
1095 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1099 /* Vital Product Data (VPD) */
1100 rc = ibmvnic_get_vpd(adapter);
1102 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1106 adapter->map_id = 1;
1108 rc = init_napi(adapter);
1112 send_query_map(adapter);
1114 rc = init_rx_pools(netdev);
1118 rc = init_tx_pools(netdev);
1122 static int __ibmvnic_open(struct net_device *netdev)
1124 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1125 enum vnic_state prev_state = adapter->state;
1128 adapter->state = VNIC_OPENING;
1129 replenish_pools(adapter);
1130 ibmvnic_napi_enable(adapter);
1132 /* We're ready to receive frames, enable the sub-crq interrupts and
1133 * set the logical link state to up
1135 for (i = 0; i < adapter->req_rx_queues; i++) {
1136 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1137 if (prev_state == VNIC_CLOSED)
1138 enable_irq(adapter->rx_scrq[i]->irq);
1139 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1142 for (i = 0; i < adapter->req_tx_queues; i++) {
1143 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1144 if (prev_state == VNIC_CLOSED)
1145 enable_irq(adapter->tx_scrq[i]->irq);
1146 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1147 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1150 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1152 for (i = 0; i < adapter->req_rx_queues; i++)
1153 napi_disable(&adapter->napi[i]);
1154 release_resources(adapter);
1158 netif_tx_start_all_queues(netdev);
1160 if (prev_state == VNIC_CLOSED) {
1161 for (i = 0; i < adapter->req_rx_queues; i++)
1162 napi_schedule(&adapter->napi[i]);
1165 adapter->state = VNIC_OPEN;
1169 static int ibmvnic_open(struct net_device *netdev)
1171 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1176 /* If device failover is pending or we are about to reset, just set
1177 * device state and return. Device operation will be handled by reset
1180 * It should be safe to overwrite the adapter->state here. Since
1181 * we hold the rtnl, either the reset has not actually started or
1182 * the rtnl got dropped during the set_link_state() in do_reset().
1183 * In the former case, no one else is changing the state (again we
1184 * have the rtnl) and in the latter case, do_reset() will detect and
1185 * honor our setting below.
1187 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1188 netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n",
1189 adapter->state, adapter->failover_pending);
1190 adapter->state = VNIC_OPEN;
1195 if (adapter->state != VNIC_CLOSED) {
1196 rc = ibmvnic_login(netdev);
1200 rc = init_resources(adapter);
1202 netdev_err(netdev, "failed to initialize resources\n");
1203 release_resources(adapter);
1208 rc = __ibmvnic_open(netdev);
1211 /* If open failed and there is a pending failover or in-progress reset,
1212 * set device state and return. Device operation will be handled by
1213 * reset routine. See also comments above regarding rtnl.
1216 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1217 adapter->state = VNIC_OPEN;
1223 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1225 struct ibmvnic_rx_pool *rx_pool;
1226 struct ibmvnic_rx_buff *rx_buff;
1231 if (!adapter->rx_pool)
1234 rx_scrqs = adapter->num_active_rx_pools;
1235 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1237 /* Free any remaining skbs in the rx buffer pools */
1238 for (i = 0; i < rx_scrqs; i++) {
1239 rx_pool = &adapter->rx_pool[i];
1240 if (!rx_pool || !rx_pool->rx_buff)
1243 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1244 for (j = 0; j < rx_entries; j++) {
1245 rx_buff = &rx_pool->rx_buff[j];
1246 if (rx_buff && rx_buff->skb) {
1247 dev_kfree_skb_any(rx_buff->skb);
1248 rx_buff->skb = NULL;
1254 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1255 struct ibmvnic_tx_pool *tx_pool)
1257 struct ibmvnic_tx_buff *tx_buff;
1261 if (!tx_pool || !tx_pool->tx_buff)
1264 tx_entries = tx_pool->num_buffers;
1266 for (i = 0; i < tx_entries; i++) {
1267 tx_buff = &tx_pool->tx_buff[i];
1268 if (tx_buff && tx_buff->skb) {
1269 dev_kfree_skb_any(tx_buff->skb);
1270 tx_buff->skb = NULL;
1275 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1280 if (!adapter->tx_pool || !adapter->tso_pool)
1283 tx_scrqs = adapter->num_active_tx_pools;
1285 /* Free any remaining skbs in the tx buffer pools */
1286 for (i = 0; i < tx_scrqs; i++) {
1287 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1288 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1289 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1293 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1295 struct net_device *netdev = adapter->netdev;
1298 if (adapter->tx_scrq) {
1299 for (i = 0; i < adapter->req_tx_queues; i++)
1300 if (adapter->tx_scrq[i]->irq) {
1302 "Disabling tx_scrq[%d] irq\n", i);
1303 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1304 disable_irq(adapter->tx_scrq[i]->irq);
1308 if (adapter->rx_scrq) {
1309 for (i = 0; i < adapter->req_rx_queues; i++) {
1310 if (adapter->rx_scrq[i]->irq) {
1312 "Disabling rx_scrq[%d] irq\n", i);
1313 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1314 disable_irq(adapter->rx_scrq[i]->irq);
1320 static void ibmvnic_cleanup(struct net_device *netdev)
1322 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1324 /* ensure that transmissions are stopped if called by do_reset */
1325 if (test_bit(0, &adapter->resetting))
1326 netif_tx_disable(netdev);
1328 netif_tx_stop_all_queues(netdev);
1330 ibmvnic_napi_disable(adapter);
1331 ibmvnic_disable_irqs(adapter);
1333 clean_rx_pools(adapter);
1334 clean_tx_pools(adapter);
1337 static int __ibmvnic_close(struct net_device *netdev)
1339 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1342 adapter->state = VNIC_CLOSING;
1343 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1344 adapter->state = VNIC_CLOSED;
1348 static int ibmvnic_close(struct net_device *netdev)
1350 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1353 netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
1354 adapter->state, adapter->failover_pending,
1355 adapter->force_reset_recovery);
1357 /* If device failover is pending, just set device state and return.
1358 * Device operation will be handled by reset routine.
1360 if (adapter->failover_pending) {
1361 adapter->state = VNIC_CLOSED;
1365 rc = __ibmvnic_close(netdev);
1366 ibmvnic_cleanup(netdev);
1372 * build_hdr_data - creates L2/L3/L4 header data buffer
1373 * @hdr_field: bitfield determining needed headers
1374 * @skb: socket buffer
1375 * @hdr_len: array of header lengths
1376 * @hdr_data: buffer to write the header to
1378 * Reads hdr_field to determine which headers are needed by firmware.
1379 * Builds a buffer containing these headers. Saves individual header
1380 * lengths and total buffer length to be used to build descriptors.
1382 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1383 int *hdr_len, u8 *hdr_data)
1388 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1389 hdr_len[0] = sizeof(struct vlan_ethhdr);
1391 hdr_len[0] = sizeof(struct ethhdr);
1393 if (skb->protocol == htons(ETH_P_IP)) {
1394 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1395 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1396 hdr_len[2] = tcp_hdrlen(skb);
1397 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1398 hdr_len[2] = sizeof(struct udphdr);
1399 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1400 hdr_len[1] = sizeof(struct ipv6hdr);
1401 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1402 hdr_len[2] = tcp_hdrlen(skb);
1403 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1404 hdr_len[2] = sizeof(struct udphdr);
1405 } else if (skb->protocol == htons(ETH_P_ARP)) {
1406 hdr_len[1] = arp_hdr_len(skb->dev);
1410 memset(hdr_data, 0, 120);
1411 if ((hdr_field >> 6) & 1) {
1412 hdr = skb_mac_header(skb);
1413 memcpy(hdr_data, hdr, hdr_len[0]);
1417 if ((hdr_field >> 5) & 1) {
1418 hdr = skb_network_header(skb);
1419 memcpy(hdr_data + len, hdr, hdr_len[1]);
1423 if ((hdr_field >> 4) & 1) {
1424 hdr = skb_transport_header(skb);
1425 memcpy(hdr_data + len, hdr, hdr_len[2]);
1432 * create_hdr_descs - create header and header extension descriptors
1433 * @hdr_field: bitfield determining needed headers
1434 * @hdr_data: buffer containing header data
1435 * @len: length of data buffer
1436 * @hdr_len: array of individual header lengths
1437 * @scrq_arr: descriptor array
1439 * Creates header and, if needed, header extension descriptors and
1440 * places them in a descriptor array, scrq_arr
1443 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1444 union sub_crq *scrq_arr)
1446 union sub_crq hdr_desc;
1452 while (tmp_len > 0) {
1453 cur = hdr_data + len - tmp_len;
1455 memset(&hdr_desc, 0, sizeof(hdr_desc));
1456 if (cur != hdr_data) {
1457 data = hdr_desc.hdr_ext.data;
1458 tmp = tmp_len > 29 ? 29 : tmp_len;
1459 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1460 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1461 hdr_desc.hdr_ext.len = tmp;
1463 data = hdr_desc.hdr.data;
1464 tmp = tmp_len > 24 ? 24 : tmp_len;
1465 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1466 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1467 hdr_desc.hdr.len = tmp;
1468 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1469 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1470 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1471 hdr_desc.hdr.flag = hdr_field << 1;
1473 memcpy(data, cur, tmp);
1475 *scrq_arr = hdr_desc;
1484 * build_hdr_descs_arr - build a header descriptor array
1485 * @txbuff: tx buffer
1486 * @num_entries: number of descriptors to be sent
1487 * @hdr_field: bit field determining which headers will be sent
1489 * This function will build a TX descriptor array with applicable
1490 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1493 static void build_hdr_descs_arr(struct sk_buff *skb,
1494 union sub_crq *indir_arr,
1495 int *num_entries, u8 hdr_field)
1497 int hdr_len[3] = {0, 0, 0};
1498 u8 hdr_data[140] = {0};
1501 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1503 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1507 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1508 struct net_device *netdev)
1510 /* For some backing devices, mishandling of small packets
1511 * can result in a loss of connection or TX stall. Device
1512 * architects recommend that no packet should be smaller
1513 * than the minimum MTU value provided to the driver, so
1514 * pad any packets to that length
1516 if (skb->len < netdev->min_mtu)
1517 return skb_put_padto(skb, netdev->min_mtu);
1522 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1523 struct ibmvnic_sub_crq_queue *tx_scrq)
1525 struct ibmvnic_ind_xmit_queue *ind_bufp;
1526 struct ibmvnic_tx_buff *tx_buff;
1527 struct ibmvnic_tx_pool *tx_pool;
1528 union sub_crq tx_scrq_entry;
1534 ind_bufp = &tx_scrq->ind_buf;
1535 entries = (u64)ind_bufp->index;
1536 queue_num = tx_scrq->pool_index;
1538 for (i = entries - 1; i >= 0; --i) {
1539 tx_scrq_entry = ind_bufp->indir_arr[i];
1540 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1542 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1543 if (index & IBMVNIC_TSO_POOL_MASK) {
1544 tx_pool = &adapter->tso_pool[queue_num];
1545 index &= ~IBMVNIC_TSO_POOL_MASK;
1547 tx_pool = &adapter->tx_pool[queue_num];
1549 tx_pool->free_map[tx_pool->consumer_index] = index;
1550 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1551 tx_pool->num_buffers - 1 :
1552 tx_pool->consumer_index - 1;
1553 tx_buff = &tx_pool->tx_buff[index];
1554 adapter->netdev->stats.tx_packets--;
1555 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1556 adapter->tx_stats_buffers[queue_num].packets--;
1557 adapter->tx_stats_buffers[queue_num].bytes -=
1559 dev_kfree_skb_any(tx_buff->skb);
1560 tx_buff->skb = NULL;
1561 adapter->netdev->stats.tx_dropped++;
1563 ind_bufp->index = 0;
1564 if (atomic_sub_return(entries, &tx_scrq->used) <=
1565 (adapter->req_tx_entries_per_subcrq / 2) &&
1566 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1567 netif_wake_subqueue(adapter->netdev, queue_num);
1568 netdev_dbg(adapter->netdev, "Started queue %d\n",
1573 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1574 struct ibmvnic_sub_crq_queue *tx_scrq)
1576 struct ibmvnic_ind_xmit_queue *ind_bufp;
1582 ind_bufp = &tx_scrq->ind_buf;
1583 dma_addr = (u64)ind_bufp->indir_dma;
1584 entries = (u64)ind_bufp->index;
1585 handle = tx_scrq->handle;
1589 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1591 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1593 ind_bufp->index = 0;
1597 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1599 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1600 int queue_num = skb_get_queue_mapping(skb);
1601 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1602 struct device *dev = &adapter->vdev->dev;
1603 struct ibmvnic_ind_xmit_queue *ind_bufp;
1604 struct ibmvnic_tx_buff *tx_buff = NULL;
1605 struct ibmvnic_sub_crq_queue *tx_scrq;
1606 struct ibmvnic_tx_pool *tx_pool;
1607 unsigned int tx_send_failed = 0;
1608 netdev_tx_t ret = NETDEV_TX_OK;
1609 unsigned int tx_map_failed = 0;
1610 union sub_crq indir_arr[16];
1611 unsigned int tx_dropped = 0;
1612 unsigned int tx_packets = 0;
1613 unsigned int tx_bytes = 0;
1614 dma_addr_t data_dma_addr;
1615 struct netdev_queue *txq;
1616 unsigned long lpar_rc;
1617 union sub_crq tx_crq;
1618 unsigned int offset;
1619 int num_entries = 1;
1624 tx_scrq = adapter->tx_scrq[queue_num];
1625 txq = netdev_get_tx_queue(netdev, queue_num);
1626 ind_bufp = &tx_scrq->ind_buf;
1628 if (test_bit(0, &adapter->resetting)) {
1629 if (!netif_subqueue_stopped(netdev, skb))
1630 netif_stop_subqueue(netdev, queue_num);
1631 dev_kfree_skb_any(skb);
1636 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1640 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1644 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1647 if (skb_is_gso(skb))
1648 tx_pool = &adapter->tso_pool[queue_num];
1650 tx_pool = &adapter->tx_pool[queue_num];
1652 index = tx_pool->free_map[tx_pool->consumer_index];
1654 if (index == IBMVNIC_INVALID_MAP) {
1655 dev_kfree_skb_any(skb);
1659 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1663 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1665 offset = index * tx_pool->buf_size;
1666 dst = tx_pool->long_term_buff.buff + offset;
1667 memset(dst, 0, tx_pool->buf_size);
1668 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1670 if (skb_shinfo(skb)->nr_frags) {
1674 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1675 cur = skb_headlen(skb);
1677 /* Copy the frags */
1678 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1679 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1682 page_address(skb_frag_page(frag)) +
1683 skb_frag_off(frag), skb_frag_size(frag));
1684 cur += skb_frag_size(frag);
1687 skb_copy_from_linear_data(skb, dst, skb->len);
1690 /* post changes to long_term_buff *dst before VIOS accessing it */
1693 tx_pool->consumer_index =
1694 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1696 tx_buff = &tx_pool->tx_buff[index];
1698 tx_buff->index = index;
1699 tx_buff->pool_index = queue_num;
1701 memset(&tx_crq, 0, sizeof(tx_crq));
1702 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1703 tx_crq.v1.type = IBMVNIC_TX_DESC;
1704 tx_crq.v1.n_crq_elem = 1;
1705 tx_crq.v1.n_sge = 1;
1706 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1708 if (skb_is_gso(skb))
1709 tx_crq.v1.correlator =
1710 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1712 tx_crq.v1.correlator = cpu_to_be32(index);
1713 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1714 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1715 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1717 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1718 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1719 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1722 if (skb->protocol == htons(ETH_P_IP)) {
1723 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1724 proto = ip_hdr(skb)->protocol;
1725 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1726 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1727 proto = ipv6_hdr(skb)->nexthdr;
1730 if (proto == IPPROTO_TCP)
1731 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1732 else if (proto == IPPROTO_UDP)
1733 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1735 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1736 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1739 if (skb_is_gso(skb)) {
1740 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1741 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1745 if ((*hdrs >> 7) & 1)
1746 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1748 tx_crq.v1.n_crq_elem = num_entries;
1749 tx_buff->num_entries = num_entries;
1750 /* flush buffer if current entry can not fit */
1751 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1752 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1753 if (lpar_rc != H_SUCCESS)
1757 indir_arr[0] = tx_crq;
1758 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1759 num_entries * sizeof(struct ibmvnic_generic_scrq));
1760 ind_bufp->index += num_entries;
1761 if (__netdev_tx_sent_queue(txq, skb->len,
1762 netdev_xmit_more() &&
1763 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1764 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1765 if (lpar_rc != H_SUCCESS)
1769 if (atomic_add_return(num_entries, &tx_scrq->used)
1770 >= adapter->req_tx_entries_per_subcrq) {
1771 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1772 netif_stop_subqueue(netdev, queue_num);
1776 tx_bytes += skb->len;
1777 txq->trans_start = jiffies;
1782 dev_kfree_skb_any(skb);
1783 tx_buff->skb = NULL;
1784 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1785 tx_pool->num_buffers - 1 :
1786 tx_pool->consumer_index - 1;
1789 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1790 dev_err_ratelimited(dev, "tx: send failed\n");
1792 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1793 /* Disable TX and report carrier off if queue is closed
1794 * or pending failover.
1795 * Firmware guarantees that a signal will be sent to the
1796 * driver, triggering a reset or some other action.
1798 netif_tx_stop_all_queues(netdev);
1799 netif_carrier_off(netdev);
1802 netdev->stats.tx_dropped += tx_dropped;
1803 netdev->stats.tx_bytes += tx_bytes;
1804 netdev->stats.tx_packets += tx_packets;
1805 adapter->tx_send_failed += tx_send_failed;
1806 adapter->tx_map_failed += tx_map_failed;
1807 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1808 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1809 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1814 static void ibmvnic_set_multi(struct net_device *netdev)
1816 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1817 struct netdev_hw_addr *ha;
1818 union ibmvnic_crq crq;
1820 memset(&crq, 0, sizeof(crq));
1821 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1822 crq.request_capability.cmd = REQUEST_CAPABILITY;
1824 if (netdev->flags & IFF_PROMISC) {
1825 if (!adapter->promisc_supported)
1828 if (netdev->flags & IFF_ALLMULTI) {
1829 /* Accept all multicast */
1830 memset(&crq, 0, sizeof(crq));
1831 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1832 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1833 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1834 ibmvnic_send_crq(adapter, &crq);
1835 } else if (netdev_mc_empty(netdev)) {
1836 /* Reject all multicast */
1837 memset(&crq, 0, sizeof(crq));
1838 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1839 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1840 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1841 ibmvnic_send_crq(adapter, &crq);
1843 /* Accept one or more multicast(s) */
1844 netdev_for_each_mc_addr(ha, netdev) {
1845 memset(&crq, 0, sizeof(crq));
1846 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1847 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1848 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1849 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1851 ibmvnic_send_crq(adapter, &crq);
1857 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1859 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1860 union ibmvnic_crq crq;
1863 if (!is_valid_ether_addr(dev_addr)) {
1864 rc = -EADDRNOTAVAIL;
1868 memset(&crq, 0, sizeof(crq));
1869 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1870 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1871 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1873 mutex_lock(&adapter->fw_lock);
1874 adapter->fw_done_rc = 0;
1875 reinit_completion(&adapter->fw_done);
1877 rc = ibmvnic_send_crq(adapter, &crq);
1880 mutex_unlock(&adapter->fw_lock);
1884 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1885 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1886 if (rc || adapter->fw_done_rc) {
1888 mutex_unlock(&adapter->fw_lock);
1891 mutex_unlock(&adapter->fw_lock);
1894 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1898 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1900 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1901 struct sockaddr *addr = p;
1905 if (!is_valid_ether_addr(addr->sa_data))
1906 return -EADDRNOTAVAIL;
1908 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1909 if (adapter->state != VNIC_PROBED)
1910 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1916 * do_reset returns zero if we are able to keep processing reset events, or
1917 * non-zero if we hit a fatal error and must halt.
1919 static int do_reset(struct ibmvnic_adapter *adapter,
1920 struct ibmvnic_rwi *rwi, u32 reset_state)
1922 u64 old_num_rx_queues, old_num_tx_queues;
1923 u64 old_num_rx_slots, old_num_tx_slots;
1924 struct net_device *netdev = adapter->netdev;
1927 netdev_dbg(adapter->netdev,
1928 "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
1929 adapter->state, adapter->failover_pending,
1930 rwi->reset_reason, reset_state);
1932 adapter->reset_reason = rwi->reset_reason;
1933 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
1934 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
1937 /* Now that we have the rtnl lock, clear any pending failover.
1938 * This will ensure ibmvnic_open() has either completed or will
1939 * block until failover is complete.
1941 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1942 adapter->failover_pending = false;
1944 /* read the state and check (again) after getting rtnl */
1945 reset_state = adapter->state;
1947 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
1952 netif_carrier_off(netdev);
1954 old_num_rx_queues = adapter->req_rx_queues;
1955 old_num_tx_queues = adapter->req_tx_queues;
1956 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1957 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1959 ibmvnic_cleanup(netdev);
1961 if (reset_state == VNIC_OPEN &&
1962 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1963 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1964 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
1965 rc = __ibmvnic_close(netdev);
1969 adapter->state = VNIC_CLOSING;
1971 /* Release the RTNL lock before link state change and
1972 * re-acquire after the link state change to allow
1973 * linkwatch_event to grab the RTNL lock and run during
1977 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1982 if (adapter->state == VNIC_OPEN) {
1983 /* When we dropped rtnl, ibmvnic_open() got
1984 * it and noticed that we are resetting and
1985 * set the adapter state to OPEN. Update our
1986 * new "target" state, and resume the reset
1987 * from VNIC_CLOSING state.
1990 "Open changed state from %d, updating.\n",
1992 reset_state = VNIC_OPEN;
1993 adapter->state = VNIC_CLOSING;
1996 if (adapter->state != VNIC_CLOSING) {
1997 /* If someone else changed the adapter state
1998 * when we dropped the rtnl, fail the reset
2003 adapter->state = VNIC_CLOSED;
2007 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2008 release_resources(adapter);
2009 release_sub_crqs(adapter, 1);
2010 release_crq_queue(adapter);
2013 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2014 /* remove the closed state so when we call open it appears
2015 * we are coming from the probed state.
2017 adapter->state = VNIC_PROBED;
2019 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2020 rc = init_crq_queue(adapter);
2021 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2022 rc = ibmvnic_reenable_crq_queue(adapter);
2023 release_sub_crqs(adapter, 1);
2025 rc = ibmvnic_reset_crq(adapter);
2026 if (rc == H_CLOSED || rc == H_SUCCESS) {
2027 rc = vio_enable_interrupts(adapter->vdev);
2029 netdev_err(adapter->netdev,
2030 "Reset failed to enable interrupts. rc=%d\n",
2036 netdev_err(adapter->netdev,
2037 "Reset couldn't initialize crq. rc=%d\n", rc);
2041 rc = ibmvnic_reset_init(adapter, true);
2043 rc = IBMVNIC_INIT_FAILED;
2047 /* If the adapter was in PROBE state prior to the reset,
2050 if (reset_state == VNIC_PROBED) {
2055 rc = ibmvnic_login(netdev);
2059 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2060 rc = init_resources(adapter);
2063 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2064 adapter->req_tx_queues != old_num_tx_queues ||
2065 adapter->req_rx_add_entries_per_subcrq !=
2067 adapter->req_tx_entries_per_subcrq !=
2069 !adapter->rx_pool ||
2070 !adapter->tso_pool ||
2071 !adapter->tx_pool) {
2072 release_rx_pools(adapter);
2073 release_tx_pools(adapter);
2074 release_napi(adapter);
2075 release_vpd_data(adapter);
2077 rc = init_resources(adapter);
2082 rc = reset_tx_pools(adapter);
2084 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2089 rc = reset_rx_pools(adapter);
2091 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2096 ibmvnic_disable_irqs(adapter);
2098 adapter->state = VNIC_CLOSED;
2100 if (reset_state == VNIC_CLOSED) {
2105 rc = __ibmvnic_open(netdev);
2107 rc = IBMVNIC_OPEN_FAILED;
2111 /* refresh device's multicast list */
2112 ibmvnic_set_multi(netdev);
2115 for (i = 0; i < adapter->req_rx_queues; i++)
2116 napi_schedule(&adapter->napi[i]);
2118 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2119 adapter->reset_reason == VNIC_RESET_MOBILITY)
2120 __netdev_notify_peers(netdev);
2125 /* restore the adapter state if reset failed */
2127 adapter->state = reset_state;
2128 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2129 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2132 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
2133 adapter->state, adapter->failover_pending, rc);
2137 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2138 struct ibmvnic_rwi *rwi, u32 reset_state)
2140 struct net_device *netdev = adapter->netdev;
2143 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2146 /* read the state and check (again) after getting rtnl */
2147 reset_state = adapter->state;
2149 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2154 netif_carrier_off(netdev);
2155 adapter->reset_reason = rwi->reset_reason;
2157 ibmvnic_cleanup(netdev);
2158 release_resources(adapter);
2159 release_sub_crqs(adapter, 0);
2160 release_crq_queue(adapter);
2162 /* remove the closed state so when we call open it appears
2163 * we are coming from the probed state.
2165 adapter->state = VNIC_PROBED;
2167 reinit_completion(&adapter->init_done);
2168 rc = init_crq_queue(adapter);
2170 netdev_err(adapter->netdev,
2171 "Couldn't initialize crq. rc=%d\n", rc);
2175 rc = ibmvnic_reset_init(adapter, false);
2179 /* If the adapter was in PROBE state prior to the reset,
2182 if (reset_state == VNIC_PROBED)
2185 rc = ibmvnic_login(netdev);
2189 rc = init_resources(adapter);
2193 ibmvnic_disable_irqs(adapter);
2194 adapter->state = VNIC_CLOSED;
2196 if (reset_state == VNIC_CLOSED)
2199 rc = __ibmvnic_open(netdev);
2201 rc = IBMVNIC_OPEN_FAILED;
2205 __netdev_notify_peers(netdev);
2207 /* restore adapter state if reset failed */
2209 adapter->state = reset_state;
2210 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
2211 adapter->state, adapter->failover_pending, rc);
2215 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2217 struct ibmvnic_rwi *rwi;
2218 unsigned long flags;
2220 spin_lock_irqsave(&adapter->rwi_lock, flags);
2222 if (!list_empty(&adapter->rwi_list)) {
2223 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2225 list_del(&rwi->list);
2230 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2234 static void __ibmvnic_reset(struct work_struct *work)
2236 struct ibmvnic_rwi *rwi;
2237 struct ibmvnic_adapter *adapter;
2238 bool saved_state = false;
2239 unsigned long flags;
2243 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2245 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2246 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2247 IBMVNIC_RESET_DELAY);
2251 rwi = get_next_rwi(adapter);
2253 spin_lock_irqsave(&adapter->state_lock, flags);
2255 if (adapter->state == VNIC_REMOVING ||
2256 adapter->state == VNIC_REMOVED) {
2257 spin_unlock_irqrestore(&adapter->state_lock, flags);
2264 reset_state = adapter->state;
2267 spin_unlock_irqrestore(&adapter->state_lock, flags);
2269 if (adapter->force_reset_recovery) {
2270 /* Since we are doing a hard reset now, clear the
2271 * failover_pending flag so we don't ignore any
2272 * future MOBILITY or other resets.
2274 adapter->failover_pending = false;
2276 /* Transport event occurred during previous reset */
2277 if (adapter->wait_for_reset) {
2278 /* Previous was CHANGE_PARAM; caller locked */
2279 adapter->force_reset_recovery = false;
2280 rc = do_hard_reset(adapter, rwi, reset_state);
2283 adapter->force_reset_recovery = false;
2284 rc = do_hard_reset(adapter, rwi, reset_state);
2288 /* give backing device time to settle down */
2289 netdev_dbg(adapter->netdev,
2290 "[S:%d] Hard reset failed, waiting 60 secs\n",
2292 set_current_state(TASK_UNINTERRUPTIBLE);
2293 schedule_timeout(60 * HZ);
2296 rc = do_reset(adapter, rwi, reset_state);
2299 adapter->last_reset_time = jiffies;
2302 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2304 rwi = get_next_rwi(adapter);
2306 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2307 rwi->reset_reason == VNIC_RESET_MOBILITY))
2308 adapter->force_reset_recovery = true;
2311 if (adapter->wait_for_reset) {
2312 adapter->reset_done_rc = rc;
2313 complete(&adapter->reset_done);
2316 clear_bit_unlock(0, &adapter->resetting);
2318 netdev_dbg(adapter->netdev,
2319 "[S:%d FRR:%d WFR:%d] Done processing resets\n",
2320 adapter->state, adapter->force_reset_recovery,
2321 adapter->wait_for_reset);
2324 static void __ibmvnic_delayed_reset(struct work_struct *work)
2326 struct ibmvnic_adapter *adapter;
2328 adapter = container_of(work, struct ibmvnic_adapter,
2329 ibmvnic_delayed_reset.work);
2330 __ibmvnic_reset(&adapter->ibmvnic_reset);
2333 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2334 enum ibmvnic_reset_reason reason)
2336 struct list_head *entry, *tmp_entry;
2337 struct ibmvnic_rwi *rwi, *tmp;
2338 struct net_device *netdev = adapter->netdev;
2339 unsigned long flags;
2342 spin_lock_irqsave(&adapter->rwi_lock, flags);
2344 /* If failover is pending don't schedule any other reset.
2345 * Instead let the failover complete. If there is already a
2346 * a failover reset scheduled, we will detect and drop the
2347 * duplicate reset when walking the ->rwi_list below.
2349 if (adapter->state == VNIC_REMOVING ||
2350 adapter->state == VNIC_REMOVED ||
2351 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2353 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2357 if (adapter->state == VNIC_PROBING) {
2358 netdev_warn(netdev, "Adapter reset during probe\n");
2359 adapter->init_done_rc = EAGAIN;
2364 list_for_each(entry, &adapter->rwi_list) {
2365 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2366 if (tmp->reset_reason == reason) {
2367 netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
2374 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2379 /* if we just received a transport event,
2380 * flush reset queue and process this reset
2382 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2383 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2386 rwi->reset_reason = reason;
2387 list_add_tail(&rwi->list, &adapter->rwi_list);
2388 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2389 schedule_work(&adapter->ibmvnic_reset);
2393 /* ibmvnic_close() below can block, so drop the lock first */
2394 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2397 ibmvnic_close(netdev);
2402 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2404 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2406 if (test_bit(0, &adapter->resetting)) {
2407 netdev_err(adapter->netdev,
2408 "Adapter is resetting, skip timeout reset\n");
2411 /* No queuing up reset until at least 5 seconds (default watchdog val)
2414 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2415 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2418 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2421 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2422 struct ibmvnic_rx_buff *rx_buff)
2424 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2426 rx_buff->skb = NULL;
2428 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2429 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2431 atomic_dec(&pool->available);
2434 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2436 struct ibmvnic_sub_crq_queue *rx_scrq;
2437 struct ibmvnic_adapter *adapter;
2438 struct net_device *netdev;
2439 int frames_processed;
2443 adapter = netdev_priv(netdev);
2444 scrq_num = (int)(napi - adapter->napi);
2445 frames_processed = 0;
2446 rx_scrq = adapter->rx_scrq[scrq_num];
2449 while (frames_processed < budget) {
2450 struct sk_buff *skb;
2451 struct ibmvnic_rx_buff *rx_buff;
2452 union sub_crq *next;
2457 if (unlikely(test_bit(0, &adapter->resetting) &&
2458 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2459 enable_scrq_irq(adapter, rx_scrq);
2460 napi_complete_done(napi, frames_processed);
2461 return frames_processed;
2464 if (!pending_scrq(adapter, rx_scrq))
2466 next = ibmvnic_next_scrq(adapter, rx_scrq);
2467 rx_buff = (struct ibmvnic_rx_buff *)
2468 be64_to_cpu(next->rx_comp.correlator);
2469 /* do error checking */
2470 if (next->rx_comp.rc) {
2471 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2472 be16_to_cpu(next->rx_comp.rc));
2473 /* free the entry */
2474 next->rx_comp.first = 0;
2475 dev_kfree_skb_any(rx_buff->skb);
2476 remove_buff_from_pool(adapter, rx_buff);
2478 } else if (!rx_buff->skb) {
2479 /* free the entry */
2480 next->rx_comp.first = 0;
2481 remove_buff_from_pool(adapter, rx_buff);
2485 length = be32_to_cpu(next->rx_comp.len);
2486 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2487 flags = next->rx_comp.flags;
2489 /* load long_term_buff before copying to skb */
2491 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2494 /* VLAN Header has been stripped by the system firmware and
2495 * needs to be inserted by the driver
2497 if (adapter->rx_vlan_header_insertion &&
2498 (flags & IBMVNIC_VLAN_STRIPPED))
2499 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2500 ntohs(next->rx_comp.vlan_tci));
2502 /* free the entry */
2503 next->rx_comp.first = 0;
2504 remove_buff_from_pool(adapter, rx_buff);
2506 skb_put(skb, length);
2507 skb->protocol = eth_type_trans(skb, netdev);
2508 skb_record_rx_queue(skb, scrq_num);
2510 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2511 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2512 skb->ip_summed = CHECKSUM_UNNECESSARY;
2516 napi_gro_receive(napi, skb); /* send it up */
2517 netdev->stats.rx_packets++;
2518 netdev->stats.rx_bytes += length;
2519 adapter->rx_stats_buffers[scrq_num].packets++;
2520 adapter->rx_stats_buffers[scrq_num].bytes += length;
2524 if (adapter->state != VNIC_CLOSING &&
2525 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2526 adapter->req_rx_add_entries_per_subcrq / 2) ||
2527 frames_processed < budget))
2528 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2529 if (frames_processed < budget) {
2530 if (napi_complete_done(napi, frames_processed)) {
2531 enable_scrq_irq(adapter, rx_scrq);
2532 if (pending_scrq(adapter, rx_scrq)) {
2533 if (napi_reschedule(napi)) {
2534 disable_scrq_irq(adapter, rx_scrq);
2540 return frames_processed;
2543 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2547 adapter->fallback.mtu = adapter->req_mtu;
2548 adapter->fallback.rx_queues = adapter->req_rx_queues;
2549 adapter->fallback.tx_queues = adapter->req_tx_queues;
2550 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2551 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2553 reinit_completion(&adapter->reset_done);
2554 adapter->wait_for_reset = true;
2555 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2561 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2568 if (adapter->reset_done_rc) {
2570 adapter->desired.mtu = adapter->fallback.mtu;
2571 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2572 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2573 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2574 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2576 reinit_completion(&adapter->reset_done);
2577 adapter->wait_for_reset = true;
2578 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2583 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2591 adapter->wait_for_reset = false;
2596 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2598 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2600 adapter->desired.mtu = new_mtu + ETH_HLEN;
2602 return wait_for_reset(adapter);
2605 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2606 struct net_device *dev,
2607 netdev_features_t features)
2609 /* Some backing hardware adapters can not
2610 * handle packets with a MSS less than 224
2611 * or with only one segment.
2613 if (skb_is_gso(skb)) {
2614 if (skb_shinfo(skb)->gso_size < 224 ||
2615 skb_shinfo(skb)->gso_segs == 1)
2616 features &= ~NETIF_F_GSO_MASK;
2622 static const struct net_device_ops ibmvnic_netdev_ops = {
2623 .ndo_open = ibmvnic_open,
2624 .ndo_stop = ibmvnic_close,
2625 .ndo_start_xmit = ibmvnic_xmit,
2626 .ndo_set_rx_mode = ibmvnic_set_multi,
2627 .ndo_set_mac_address = ibmvnic_set_mac,
2628 .ndo_validate_addr = eth_validate_addr,
2629 .ndo_tx_timeout = ibmvnic_tx_timeout,
2630 .ndo_change_mtu = ibmvnic_change_mtu,
2631 .ndo_features_check = ibmvnic_features_check,
2634 /* ethtool functions */
2636 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2637 struct ethtool_link_ksettings *cmd)
2639 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2642 rc = send_query_phys_parms(adapter);
2644 adapter->speed = SPEED_UNKNOWN;
2645 adapter->duplex = DUPLEX_UNKNOWN;
2647 cmd->base.speed = adapter->speed;
2648 cmd->base.duplex = adapter->duplex;
2649 cmd->base.port = PORT_FIBRE;
2650 cmd->base.phy_address = 0;
2651 cmd->base.autoneg = AUTONEG_ENABLE;
2656 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2657 struct ethtool_drvinfo *info)
2659 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2661 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2662 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2663 strscpy(info->fw_version, adapter->fw_version,
2664 sizeof(info->fw_version));
2667 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2669 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2671 return adapter->msg_enable;
2674 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2676 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2678 adapter->msg_enable = data;
2681 static u32 ibmvnic_get_link(struct net_device *netdev)
2683 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2685 /* Don't need to send a query because we request a logical link up at
2686 * init and then we wait for link state indications
2688 return adapter->logical_link_state;
2691 static void ibmvnic_get_ringparam(struct net_device *netdev,
2692 struct ethtool_ringparam *ring)
2694 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2696 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2697 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2698 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2700 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2701 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2703 ring->rx_mini_max_pending = 0;
2704 ring->rx_jumbo_max_pending = 0;
2705 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2706 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2707 ring->rx_mini_pending = 0;
2708 ring->rx_jumbo_pending = 0;
2711 static int ibmvnic_set_ringparam(struct net_device *netdev,
2712 struct ethtool_ringparam *ring)
2714 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2718 adapter->desired.rx_entries = ring->rx_pending;
2719 adapter->desired.tx_entries = ring->tx_pending;
2721 ret = wait_for_reset(adapter);
2724 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2725 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2727 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2728 ring->rx_pending, ring->tx_pending,
2729 adapter->req_rx_add_entries_per_subcrq,
2730 adapter->req_tx_entries_per_subcrq);
2734 static void ibmvnic_get_channels(struct net_device *netdev,
2735 struct ethtool_channels *channels)
2737 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2739 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2740 channels->max_rx = adapter->max_rx_queues;
2741 channels->max_tx = adapter->max_tx_queues;
2743 channels->max_rx = IBMVNIC_MAX_QUEUES;
2744 channels->max_tx = IBMVNIC_MAX_QUEUES;
2747 channels->max_other = 0;
2748 channels->max_combined = 0;
2749 channels->rx_count = adapter->req_rx_queues;
2750 channels->tx_count = adapter->req_tx_queues;
2751 channels->other_count = 0;
2752 channels->combined_count = 0;
2755 static int ibmvnic_set_channels(struct net_device *netdev,
2756 struct ethtool_channels *channels)
2758 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2762 adapter->desired.rx_queues = channels->rx_count;
2763 adapter->desired.tx_queues = channels->tx_count;
2765 ret = wait_for_reset(adapter);
2768 (adapter->req_rx_queues != channels->rx_count ||
2769 adapter->req_tx_queues != channels->tx_count))
2771 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2772 channels->rx_count, channels->tx_count,
2773 adapter->req_rx_queues, adapter->req_tx_queues);
2777 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2779 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2782 switch (stringset) {
2784 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2785 i++, data += ETH_GSTRING_LEN)
2786 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2788 for (i = 0; i < adapter->req_tx_queues; i++) {
2789 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2790 data += ETH_GSTRING_LEN;
2792 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2793 data += ETH_GSTRING_LEN;
2795 snprintf(data, ETH_GSTRING_LEN,
2796 "tx%d_dropped_packets", i);
2797 data += ETH_GSTRING_LEN;
2800 for (i = 0; i < adapter->req_rx_queues; i++) {
2801 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2802 data += ETH_GSTRING_LEN;
2804 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2805 data += ETH_GSTRING_LEN;
2807 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2808 data += ETH_GSTRING_LEN;
2812 case ETH_SS_PRIV_FLAGS:
2813 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2814 strcpy(data + i * ETH_GSTRING_LEN,
2815 ibmvnic_priv_flags[i]);
2822 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2824 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2828 return ARRAY_SIZE(ibmvnic_stats) +
2829 adapter->req_tx_queues * NUM_TX_STATS +
2830 adapter->req_rx_queues * NUM_RX_STATS;
2831 case ETH_SS_PRIV_FLAGS:
2832 return ARRAY_SIZE(ibmvnic_priv_flags);
2838 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2839 struct ethtool_stats *stats, u64 *data)
2841 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2842 union ibmvnic_crq crq;
2846 memset(&crq, 0, sizeof(crq));
2847 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2848 crq.request_statistics.cmd = REQUEST_STATISTICS;
2849 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2850 crq.request_statistics.len =
2851 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2853 /* Wait for data to be written */
2854 reinit_completion(&adapter->stats_done);
2855 rc = ibmvnic_send_crq(adapter, &crq);
2858 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2862 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2863 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
2864 (adapter, ibmvnic_stats[i].offset));
2866 for (j = 0; j < adapter->req_tx_queues; j++) {
2867 data[i] = adapter->tx_stats_buffers[j].packets;
2869 data[i] = adapter->tx_stats_buffers[j].bytes;
2871 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2875 for (j = 0; j < adapter->req_rx_queues; j++) {
2876 data[i] = adapter->rx_stats_buffers[j].packets;
2878 data[i] = adapter->rx_stats_buffers[j].bytes;
2880 data[i] = adapter->rx_stats_buffers[j].interrupts;
2885 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2887 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2889 return adapter->priv_flags;
2892 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2894 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2895 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2898 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2900 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2905 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2906 .get_drvinfo = ibmvnic_get_drvinfo,
2907 .get_msglevel = ibmvnic_get_msglevel,
2908 .set_msglevel = ibmvnic_set_msglevel,
2909 .get_link = ibmvnic_get_link,
2910 .get_ringparam = ibmvnic_get_ringparam,
2911 .set_ringparam = ibmvnic_set_ringparam,
2912 .get_channels = ibmvnic_get_channels,
2913 .set_channels = ibmvnic_set_channels,
2914 .get_strings = ibmvnic_get_strings,
2915 .get_sset_count = ibmvnic_get_sset_count,
2916 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2917 .get_link_ksettings = ibmvnic_get_link_ksettings,
2918 .get_priv_flags = ibmvnic_get_priv_flags,
2919 .set_priv_flags = ibmvnic_set_priv_flags,
2922 /* Routines for managing CRQs/sCRQs */
2924 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2925 struct ibmvnic_sub_crq_queue *scrq)
2930 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2935 free_irq(scrq->irq, scrq);
2936 irq_dispose_mapping(scrq->irq);
2941 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2942 atomic_set(&scrq->used, 0);
2944 scrq->ind_buf.index = 0;
2946 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2950 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2951 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2955 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2959 if (!adapter->tx_scrq || !adapter->rx_scrq)
2962 for (i = 0; i < adapter->req_tx_queues; i++) {
2963 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2964 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2969 for (i = 0; i < adapter->req_rx_queues; i++) {
2970 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2971 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2979 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2980 struct ibmvnic_sub_crq_queue *scrq,
2983 struct device *dev = &adapter->vdev->dev;
2986 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2989 /* Close the sub-crqs */
2991 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2992 adapter->vdev->unit_address,
2994 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2997 netdev_err(adapter->netdev,
2998 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3003 dma_free_coherent(dev,
3005 scrq->ind_buf.indir_arr,
3006 scrq->ind_buf.indir_dma);
3008 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3010 free_pages((unsigned long)scrq->msgs, 2);
3014 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3017 struct device *dev = &adapter->vdev->dev;
3018 struct ibmvnic_sub_crq_queue *scrq;
3021 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3026 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3028 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3029 goto zero_page_failed;
3032 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3034 if (dma_mapping_error(dev, scrq->msg_token)) {
3035 dev_warn(dev, "Couldn't map crq queue messages page\n");
3039 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3040 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3042 if (rc == H_RESOURCE)
3043 rc = ibmvnic_reset_crq(adapter);
3045 if (rc == H_CLOSED) {
3046 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3048 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3052 scrq->adapter = adapter;
3053 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3054 scrq->ind_buf.index = 0;
3056 scrq->ind_buf.indir_arr =
3057 dma_alloc_coherent(dev,
3059 &scrq->ind_buf.indir_dma,
3062 if (!scrq->ind_buf.indir_arr)
3065 spin_lock_init(&scrq->lock);
3067 netdev_dbg(adapter->netdev,
3068 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3069 scrq->crq_num, scrq->hw_irq, scrq->irq);
3075 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3076 adapter->vdev->unit_address,
3078 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3080 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3083 free_pages((unsigned long)scrq->msgs, 2);
3090 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3094 if (adapter->tx_scrq) {
3095 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3096 if (!adapter->tx_scrq[i])
3099 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3101 if (adapter->tx_scrq[i]->irq) {
3102 free_irq(adapter->tx_scrq[i]->irq,
3103 adapter->tx_scrq[i]);
3104 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3105 adapter->tx_scrq[i]->irq = 0;
3108 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3112 kfree(adapter->tx_scrq);
3113 adapter->tx_scrq = NULL;
3114 adapter->num_active_tx_scrqs = 0;
3117 if (adapter->rx_scrq) {
3118 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3119 if (!adapter->rx_scrq[i])
3122 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3124 if (adapter->rx_scrq[i]->irq) {
3125 free_irq(adapter->rx_scrq[i]->irq,
3126 adapter->rx_scrq[i]);
3127 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3128 adapter->rx_scrq[i]->irq = 0;
3131 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3135 kfree(adapter->rx_scrq);
3136 adapter->rx_scrq = NULL;
3137 adapter->num_active_rx_scrqs = 0;
3141 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3142 struct ibmvnic_sub_crq_queue *scrq)
3144 struct device *dev = &adapter->vdev->dev;
3147 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3148 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3150 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3155 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3156 struct ibmvnic_sub_crq_queue *scrq)
3158 struct device *dev = &adapter->vdev->dev;
3161 if (scrq->hw_irq > 0x100000000ULL) {
3162 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3166 if (test_bit(0, &adapter->resetting) &&
3167 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3168 u64 val = (0xff000000) | scrq->hw_irq;
3170 rc = plpar_hcall_norets(H_EOI, val);
3171 /* H_EOI would fail with rc = H_FUNCTION when running
3172 * in XIVE mode which is expected, but not an error.
3174 if (rc && rc != H_FUNCTION)
3175 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3179 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3180 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3182 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3187 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3188 struct ibmvnic_sub_crq_queue *scrq)
3190 struct device *dev = &adapter->vdev->dev;
3191 struct ibmvnic_tx_pool *tx_pool;
3192 struct ibmvnic_tx_buff *txbuff;
3193 struct netdev_queue *txq;
3194 union sub_crq *next;
3199 while (pending_scrq(adapter, scrq)) {
3200 unsigned int pool = scrq->pool_index;
3201 int num_entries = 0;
3202 int total_bytes = 0;
3203 int num_packets = 0;
3205 next = ibmvnic_next_scrq(adapter, scrq);
3206 for (i = 0; i < next->tx_comp.num_comps; i++) {
3207 index = be32_to_cpu(next->tx_comp.correlators[i]);
3208 if (index & IBMVNIC_TSO_POOL_MASK) {
3209 tx_pool = &adapter->tso_pool[pool];
3210 index &= ~IBMVNIC_TSO_POOL_MASK;
3212 tx_pool = &adapter->tx_pool[pool];
3215 txbuff = &tx_pool->tx_buff[index];
3217 num_entries += txbuff->num_entries;
3219 total_bytes += txbuff->skb->len;
3220 if (next->tx_comp.rcs[i]) {
3221 dev_err(dev, "tx error %x\n",
3222 next->tx_comp.rcs[i]);
3223 dev_kfree_skb_irq(txbuff->skb);
3225 dev_consume_skb_irq(txbuff->skb);
3229 netdev_warn(adapter->netdev,
3230 "TX completion received with NULL socket buffer\n");
3232 tx_pool->free_map[tx_pool->producer_index] = index;
3233 tx_pool->producer_index =
3234 (tx_pool->producer_index + 1) %
3235 tx_pool->num_buffers;
3237 /* remove tx_comp scrq*/
3238 next->tx_comp.first = 0;
3240 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3241 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3243 if (atomic_sub_return(num_entries, &scrq->used) <=
3244 (adapter->req_tx_entries_per_subcrq / 2) &&
3245 __netif_subqueue_stopped(adapter->netdev,
3246 scrq->pool_index)) {
3247 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3248 netdev_dbg(adapter->netdev, "Started queue %d\n",
3253 enable_scrq_irq(adapter, scrq);
3255 if (pending_scrq(adapter, scrq)) {
3256 disable_scrq_irq(adapter, scrq);
3263 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3265 struct ibmvnic_sub_crq_queue *scrq = instance;
3266 struct ibmvnic_adapter *adapter = scrq->adapter;
3268 disable_scrq_irq(adapter, scrq);
3269 ibmvnic_complete_tx(adapter, scrq);
3274 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3276 struct ibmvnic_sub_crq_queue *scrq = instance;
3277 struct ibmvnic_adapter *adapter = scrq->adapter;
3279 /* When booting a kdump kernel we can hit pending interrupts
3280 * prior to completing driver initialization.
3282 if (unlikely(adapter->state != VNIC_OPEN))
3285 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3287 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3288 disable_scrq_irq(adapter, scrq);
3289 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3295 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3297 struct device *dev = &adapter->vdev->dev;
3298 struct ibmvnic_sub_crq_queue *scrq;
3302 for (i = 0; i < adapter->req_tx_queues; i++) {
3303 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3305 scrq = adapter->tx_scrq[i];
3306 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3310 dev_err(dev, "Error mapping irq\n");
3311 goto req_tx_irq_failed;
3314 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3315 adapter->vdev->unit_address, i);
3316 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3317 0, scrq->name, scrq);
3320 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3322 irq_dispose_mapping(scrq->irq);
3323 goto req_tx_irq_failed;
3327 for (i = 0; i < adapter->req_rx_queues; i++) {
3328 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3330 scrq = adapter->rx_scrq[i];
3331 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3334 dev_err(dev, "Error mapping irq\n");
3335 goto req_rx_irq_failed;
3337 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3338 adapter->vdev->unit_address, i);
3339 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3340 0, scrq->name, scrq);
3342 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3344 irq_dispose_mapping(scrq->irq);
3345 goto req_rx_irq_failed;
3351 for (j = 0; j < i; j++) {
3352 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3353 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3355 i = adapter->req_tx_queues;
3357 for (j = 0; j < i; j++) {
3358 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3359 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3361 release_sub_crqs(adapter, 1);
3365 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3367 struct device *dev = &adapter->vdev->dev;
3368 struct ibmvnic_sub_crq_queue **allqueues;
3369 int registered_queues = 0;
3374 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3376 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3380 for (i = 0; i < total_queues; i++) {
3381 allqueues[i] = init_sub_crq_queue(adapter);
3382 if (!allqueues[i]) {
3383 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3386 registered_queues++;
3389 /* Make sure we were able to register the minimum number of queues */
3390 if (registered_queues <
3391 adapter->min_tx_queues + adapter->min_rx_queues) {
3392 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3396 /* Distribute the failed allocated queues*/
3397 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3398 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3401 if (adapter->req_rx_queues > adapter->min_rx_queues)
3402 adapter->req_rx_queues--;
3407 if (adapter->req_tx_queues > adapter->min_tx_queues)
3408 adapter->req_tx_queues--;
3415 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3416 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3417 if (!adapter->tx_scrq)
3420 for (i = 0; i < adapter->req_tx_queues; i++) {
3421 adapter->tx_scrq[i] = allqueues[i];
3422 adapter->tx_scrq[i]->pool_index = i;
3423 adapter->num_active_tx_scrqs++;
3426 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3427 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3428 if (!adapter->rx_scrq)
3431 for (i = 0; i < adapter->req_rx_queues; i++) {
3432 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3433 adapter->rx_scrq[i]->scrq_num = i;
3434 adapter->num_active_rx_scrqs++;
3441 kfree(adapter->tx_scrq);
3442 adapter->tx_scrq = NULL;
3444 for (i = 0; i < registered_queues; i++)
3445 release_sub_crq_queue(adapter, allqueues[i], 1);
3450 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3452 struct device *dev = &adapter->vdev->dev;
3453 union ibmvnic_crq crq;
3457 /* Sub-CRQ entries are 32 byte long */
3458 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3460 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3461 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3462 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3466 if (adapter->desired.mtu)
3467 adapter->req_mtu = adapter->desired.mtu;
3469 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3471 if (!adapter->desired.tx_entries)
3472 adapter->desired.tx_entries =
3473 adapter->max_tx_entries_per_subcrq;
3474 if (!adapter->desired.rx_entries)
3475 adapter->desired.rx_entries =
3476 adapter->max_rx_add_entries_per_subcrq;
3478 max_entries = IBMVNIC_MAX_LTB_SIZE /
3479 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3481 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3482 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3483 adapter->desired.tx_entries = max_entries;
3486 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3487 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3488 adapter->desired.rx_entries = max_entries;
3491 if (adapter->desired.tx_entries)
3492 adapter->req_tx_entries_per_subcrq =
3493 adapter->desired.tx_entries;
3495 adapter->req_tx_entries_per_subcrq =
3496 adapter->max_tx_entries_per_subcrq;
3498 if (adapter->desired.rx_entries)
3499 adapter->req_rx_add_entries_per_subcrq =
3500 adapter->desired.rx_entries;
3502 adapter->req_rx_add_entries_per_subcrq =
3503 adapter->max_rx_add_entries_per_subcrq;
3505 if (adapter->desired.tx_queues)
3506 adapter->req_tx_queues =
3507 adapter->desired.tx_queues;
3509 adapter->req_tx_queues =
3510 adapter->opt_tx_comp_sub_queues;
3512 if (adapter->desired.rx_queues)
3513 adapter->req_rx_queues =
3514 adapter->desired.rx_queues;
3516 adapter->req_rx_queues =
3517 adapter->opt_rx_comp_queues;
3519 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3522 memset(&crq, 0, sizeof(crq));
3523 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3524 crq.request_capability.cmd = REQUEST_CAPABILITY;
3526 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3527 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3528 atomic_inc(&adapter->running_cap_crqs);
3529 ibmvnic_send_crq(adapter, &crq);
3531 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3532 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3533 atomic_inc(&adapter->running_cap_crqs);
3534 ibmvnic_send_crq(adapter, &crq);
3536 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3537 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3538 atomic_inc(&adapter->running_cap_crqs);
3539 ibmvnic_send_crq(adapter, &crq);
3541 crq.request_capability.capability =
3542 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3543 crq.request_capability.number =
3544 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3545 atomic_inc(&adapter->running_cap_crqs);
3546 ibmvnic_send_crq(adapter, &crq);
3548 crq.request_capability.capability =
3549 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3550 crq.request_capability.number =
3551 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3552 atomic_inc(&adapter->running_cap_crqs);
3553 ibmvnic_send_crq(adapter, &crq);
3555 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3556 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3557 atomic_inc(&adapter->running_cap_crqs);
3558 ibmvnic_send_crq(adapter, &crq);
3560 if (adapter->netdev->flags & IFF_PROMISC) {
3561 if (adapter->promisc_supported) {
3562 crq.request_capability.capability =
3563 cpu_to_be16(PROMISC_REQUESTED);
3564 crq.request_capability.number = cpu_to_be64(1);
3565 atomic_inc(&adapter->running_cap_crqs);
3566 ibmvnic_send_crq(adapter, &crq);
3569 crq.request_capability.capability =
3570 cpu_to_be16(PROMISC_REQUESTED);
3571 crq.request_capability.number = cpu_to_be64(0);
3572 atomic_inc(&adapter->running_cap_crqs);
3573 ibmvnic_send_crq(adapter, &crq);
3577 static int pending_scrq(struct ibmvnic_adapter *adapter,
3578 struct ibmvnic_sub_crq_queue *scrq)
3580 union sub_crq *entry = &scrq->msgs[scrq->cur];
3583 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3585 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3586 * contents of the SCRQ descriptor
3593 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3594 struct ibmvnic_sub_crq_queue *scrq)
3596 union sub_crq *entry;
3597 unsigned long flags;
3599 spin_lock_irqsave(&scrq->lock, flags);
3600 entry = &scrq->msgs[scrq->cur];
3601 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3602 if (++scrq->cur == scrq->size)
3607 spin_unlock_irqrestore(&scrq->lock, flags);
3609 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3610 * contents of the SCRQ descriptor
3617 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3619 struct ibmvnic_crq_queue *queue = &adapter->crq;
3620 union ibmvnic_crq *crq;
3622 crq = &queue->msgs[queue->cur];
3623 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3624 if (++queue->cur == queue->size)
3633 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3637 dev_warn_ratelimited(dev,
3638 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3642 dev_warn_ratelimited(dev,
3643 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3647 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3652 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3653 u64 remote_handle, u64 ioba, u64 num_entries)
3655 unsigned int ua = adapter->vdev->unit_address;
3656 struct device *dev = &adapter->vdev->dev;
3659 /* Make sure the hypervisor sees the complete request */
3661 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3662 cpu_to_be64(remote_handle),
3666 print_subcrq_error(dev, rc, __func__);
3671 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3672 union ibmvnic_crq *crq)
3674 unsigned int ua = adapter->vdev->unit_address;
3675 struct device *dev = &adapter->vdev->dev;
3676 u64 *u64_crq = (u64 *)crq;
3679 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3680 (unsigned long)cpu_to_be64(u64_crq[0]),
3681 (unsigned long)cpu_to_be64(u64_crq[1]));
3683 if (!adapter->crq.active &&
3684 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3685 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3689 /* Make sure the hypervisor sees the complete request */
3692 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3693 cpu_to_be64(u64_crq[0]),
3694 cpu_to_be64(u64_crq[1]));
3697 if (rc == H_CLOSED) {
3698 dev_warn(dev, "CRQ Queue closed\n");
3699 /* do not reset, report the fail, wait for passive init from server */
3702 dev_warn(dev, "Send error (rc=%d)\n", rc);
3708 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3710 struct device *dev = &adapter->vdev->dev;
3711 union ibmvnic_crq crq;
3715 memset(&crq, 0, sizeof(crq));
3716 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3717 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3718 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3721 rc = ibmvnic_send_crq(adapter, &crq);
3727 } while (retries > 0);
3730 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3737 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3739 union ibmvnic_crq crq;
3741 memset(&crq, 0, sizeof(crq));
3742 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3743 crq.version_exchange.cmd = VERSION_EXCHANGE;
3744 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3746 return ibmvnic_send_crq(adapter, &crq);
3749 struct vnic_login_client_data {
3755 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3759 /* Calculate the amount of buffer space needed for the
3760 * vnic client data in the login buffer. There are four entries,
3761 * OS name, LPAR name, device name, and a null last entry.
3763 len = 4 * sizeof(struct vnic_login_client_data);
3764 len += 6; /* "Linux" plus NULL */
3765 len += strlen(utsname()->nodename) + 1;
3766 len += strlen(adapter->netdev->name) + 1;
3771 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3772 struct vnic_login_client_data *vlcd)
3774 const char *os_name = "Linux";
3777 /* Type 1 - LPAR OS */
3779 len = strlen(os_name) + 1;
3780 vlcd->len = cpu_to_be16(len);
3781 strncpy(vlcd->name, os_name, len);
3782 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3784 /* Type 2 - LPAR name */
3786 len = strlen(utsname()->nodename) + 1;
3787 vlcd->len = cpu_to_be16(len);
3788 strncpy(vlcd->name, utsname()->nodename, len);
3789 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3791 /* Type 3 - device name */
3793 len = strlen(adapter->netdev->name) + 1;
3794 vlcd->len = cpu_to_be16(len);
3795 strncpy(vlcd->name, adapter->netdev->name, len);
3798 static int send_login(struct ibmvnic_adapter *adapter)
3800 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3801 struct ibmvnic_login_buffer *login_buffer;
3802 struct device *dev = &adapter->vdev->dev;
3803 struct vnic_login_client_data *vlcd;
3804 dma_addr_t rsp_buffer_token;
3805 dma_addr_t buffer_token;
3806 size_t rsp_buffer_size;
3807 union ibmvnic_crq crq;
3808 int client_data_len;
3815 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3816 netdev_err(adapter->netdev,
3817 "RX or TX queues are not allocated, device login failed\n");
3821 release_login_buffer(adapter);
3822 release_login_rsp_buffer(adapter);
3824 client_data_len = vnic_client_data_len(adapter);
3827 sizeof(struct ibmvnic_login_buffer) +
3828 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3831 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3833 goto buf_alloc_failed;
3835 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3837 if (dma_mapping_error(dev, buffer_token)) {
3838 dev_err(dev, "Couldn't map login buffer\n");
3839 goto buf_map_failed;
3842 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3843 sizeof(u64) * adapter->req_tx_queues +
3844 sizeof(u64) * adapter->req_rx_queues +
3845 sizeof(u64) * adapter->req_rx_queues +
3846 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3848 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3849 if (!login_rsp_buffer)
3850 goto buf_rsp_alloc_failed;
3852 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3853 rsp_buffer_size, DMA_FROM_DEVICE);
3854 if (dma_mapping_error(dev, rsp_buffer_token)) {
3855 dev_err(dev, "Couldn't map login rsp buffer\n");
3856 goto buf_rsp_map_failed;
3859 adapter->login_buf = login_buffer;
3860 adapter->login_buf_token = buffer_token;
3861 adapter->login_buf_sz = buffer_size;
3862 adapter->login_rsp_buf = login_rsp_buffer;
3863 adapter->login_rsp_buf_token = rsp_buffer_token;
3864 adapter->login_rsp_buf_sz = rsp_buffer_size;
3866 login_buffer->len = cpu_to_be32(buffer_size);
3867 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3868 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3869 login_buffer->off_txcomp_subcrqs =
3870 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3871 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3872 login_buffer->off_rxcomp_subcrqs =
3873 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3874 sizeof(u64) * adapter->req_tx_queues);
3875 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3876 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3878 tx_list_p = (__be64 *)((char *)login_buffer +
3879 sizeof(struct ibmvnic_login_buffer));
3880 rx_list_p = (__be64 *)((char *)login_buffer +
3881 sizeof(struct ibmvnic_login_buffer) +
3882 sizeof(u64) * adapter->req_tx_queues);
3884 for (i = 0; i < adapter->req_tx_queues; i++) {
3885 if (adapter->tx_scrq[i]) {
3887 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
3891 for (i = 0; i < adapter->req_rx_queues; i++) {
3892 if (adapter->rx_scrq[i]) {
3894 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
3898 /* Insert vNIC login client data */
3899 vlcd = (struct vnic_login_client_data *)
3900 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3901 login_buffer->client_data_offset =
3902 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3903 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3905 vnic_add_client_data(adapter, vlcd);
3907 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3908 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3909 netdev_dbg(adapter->netdev, "%016lx\n",
3910 ((unsigned long *)(adapter->login_buf))[i]);
3913 memset(&crq, 0, sizeof(crq));
3914 crq.login.first = IBMVNIC_CRQ_CMD;
3915 crq.login.cmd = LOGIN;
3916 crq.login.ioba = cpu_to_be32(buffer_token);
3917 crq.login.len = cpu_to_be32(buffer_size);
3919 adapter->login_pending = true;
3920 rc = ibmvnic_send_crq(adapter, &crq);
3922 adapter->login_pending = false;
3923 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3924 goto buf_rsp_map_failed;
3930 kfree(login_rsp_buffer);
3931 adapter->login_rsp_buf = NULL;
3932 buf_rsp_alloc_failed:
3933 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3935 kfree(login_buffer);
3936 adapter->login_buf = NULL;
3941 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3944 union ibmvnic_crq crq;
3946 memset(&crq, 0, sizeof(crq));
3947 crq.request_map.first = IBMVNIC_CRQ_CMD;
3948 crq.request_map.cmd = REQUEST_MAP;
3949 crq.request_map.map_id = map_id;
3950 crq.request_map.ioba = cpu_to_be32(addr);
3951 crq.request_map.len = cpu_to_be32(len);
3952 return ibmvnic_send_crq(adapter, &crq);
3955 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3957 union ibmvnic_crq crq;
3959 memset(&crq, 0, sizeof(crq));
3960 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3961 crq.request_unmap.cmd = REQUEST_UNMAP;
3962 crq.request_unmap.map_id = map_id;
3963 return ibmvnic_send_crq(adapter, &crq);
3966 static void send_query_map(struct ibmvnic_adapter *adapter)
3968 union ibmvnic_crq crq;
3970 memset(&crq, 0, sizeof(crq));
3971 crq.query_map.first = IBMVNIC_CRQ_CMD;
3972 crq.query_map.cmd = QUERY_MAP;
3973 ibmvnic_send_crq(adapter, &crq);
3976 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3977 static void send_query_cap(struct ibmvnic_adapter *adapter)
3979 union ibmvnic_crq crq;
3981 atomic_set(&adapter->running_cap_crqs, 0);
3982 memset(&crq, 0, sizeof(crq));
3983 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3984 crq.query_capability.cmd = QUERY_CAPABILITY;
3986 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3987 atomic_inc(&adapter->running_cap_crqs);
3988 ibmvnic_send_crq(adapter, &crq);
3990 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3991 atomic_inc(&adapter->running_cap_crqs);
3992 ibmvnic_send_crq(adapter, &crq);
3994 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3995 atomic_inc(&adapter->running_cap_crqs);
3996 ibmvnic_send_crq(adapter, &crq);
3998 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3999 atomic_inc(&adapter->running_cap_crqs);
4000 ibmvnic_send_crq(adapter, &crq);
4002 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4003 atomic_inc(&adapter->running_cap_crqs);
4004 ibmvnic_send_crq(adapter, &crq);
4006 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4007 atomic_inc(&adapter->running_cap_crqs);
4008 ibmvnic_send_crq(adapter, &crq);
4010 crq.query_capability.capability =
4011 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4012 atomic_inc(&adapter->running_cap_crqs);
4013 ibmvnic_send_crq(adapter, &crq);
4015 crq.query_capability.capability =
4016 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4017 atomic_inc(&adapter->running_cap_crqs);
4018 ibmvnic_send_crq(adapter, &crq);
4020 crq.query_capability.capability =
4021 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4022 atomic_inc(&adapter->running_cap_crqs);
4023 ibmvnic_send_crq(adapter, &crq);
4025 crq.query_capability.capability =
4026 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4027 atomic_inc(&adapter->running_cap_crqs);
4028 ibmvnic_send_crq(adapter, &crq);
4030 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4031 atomic_inc(&adapter->running_cap_crqs);
4032 ibmvnic_send_crq(adapter, &crq);
4034 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4035 atomic_inc(&adapter->running_cap_crqs);
4036 ibmvnic_send_crq(adapter, &crq);
4038 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4039 atomic_inc(&adapter->running_cap_crqs);
4040 ibmvnic_send_crq(adapter, &crq);
4042 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4043 atomic_inc(&adapter->running_cap_crqs);
4044 ibmvnic_send_crq(adapter, &crq);
4046 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4047 atomic_inc(&adapter->running_cap_crqs);
4048 ibmvnic_send_crq(adapter, &crq);
4050 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4051 atomic_inc(&adapter->running_cap_crqs);
4052 ibmvnic_send_crq(adapter, &crq);
4054 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4055 atomic_inc(&adapter->running_cap_crqs);
4056 ibmvnic_send_crq(adapter, &crq);
4058 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4059 atomic_inc(&adapter->running_cap_crqs);
4060 ibmvnic_send_crq(adapter, &crq);
4062 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4063 atomic_inc(&adapter->running_cap_crqs);
4064 ibmvnic_send_crq(adapter, &crq);
4066 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4067 atomic_inc(&adapter->running_cap_crqs);
4068 ibmvnic_send_crq(adapter, &crq);
4070 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4071 atomic_inc(&adapter->running_cap_crqs);
4072 ibmvnic_send_crq(adapter, &crq);
4074 crq.query_capability.capability =
4075 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4076 atomic_inc(&adapter->running_cap_crqs);
4077 ibmvnic_send_crq(adapter, &crq);
4079 crq.query_capability.capability =
4080 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4081 atomic_inc(&adapter->running_cap_crqs);
4082 ibmvnic_send_crq(adapter, &crq);
4084 crq.query_capability.capability =
4085 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4086 atomic_inc(&adapter->running_cap_crqs);
4087 ibmvnic_send_crq(adapter, &crq);
4089 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4090 atomic_inc(&adapter->running_cap_crqs);
4091 ibmvnic_send_crq(adapter, &crq);
4094 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4096 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4097 struct device *dev = &adapter->vdev->dev;
4098 union ibmvnic_crq crq;
4100 adapter->ip_offload_tok =
4102 &adapter->ip_offload_buf,
4106 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4107 if (!firmware_has_feature(FW_FEATURE_CMO))
4108 dev_err(dev, "Couldn't map offload buffer\n");
4112 memset(&crq, 0, sizeof(crq));
4113 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4114 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4115 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4116 crq.query_ip_offload.ioba =
4117 cpu_to_be32(adapter->ip_offload_tok);
4119 ibmvnic_send_crq(adapter, &crq);
4122 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4124 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4125 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4126 struct device *dev = &adapter->vdev->dev;
4127 netdev_features_t old_hw_features = 0;
4128 union ibmvnic_crq crq;
4130 adapter->ip_offload_ctrl_tok =
4133 sizeof(adapter->ip_offload_ctrl),
4136 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4137 dev_err(dev, "Couldn't map ip offload control buffer\n");
4141 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4142 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4143 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4144 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4145 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4146 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4147 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4148 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4149 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4150 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4152 /* large_rx disabled for now, additional features needed */
4153 ctrl_buf->large_rx_ipv4 = 0;
4154 ctrl_buf->large_rx_ipv6 = 0;
4156 if (adapter->state != VNIC_PROBING) {
4157 old_hw_features = adapter->netdev->hw_features;
4158 adapter->netdev->hw_features = 0;
4161 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4163 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4164 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4166 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4167 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4169 if ((adapter->netdev->features &
4170 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4171 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4173 if (buf->large_tx_ipv4)
4174 adapter->netdev->hw_features |= NETIF_F_TSO;
4175 if (buf->large_tx_ipv6)
4176 adapter->netdev->hw_features |= NETIF_F_TSO6;
4178 if (adapter->state == VNIC_PROBING) {
4179 adapter->netdev->features |= adapter->netdev->hw_features;
4180 } else if (old_hw_features != adapter->netdev->hw_features) {
4181 netdev_features_t tmp = 0;
4183 /* disable features no longer supported */
4184 adapter->netdev->features &= adapter->netdev->hw_features;
4185 /* turn on features now supported if previously enabled */
4186 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4187 adapter->netdev->hw_features;
4188 adapter->netdev->features |=
4189 tmp & adapter->netdev->wanted_features;
4192 memset(&crq, 0, sizeof(crq));
4193 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4194 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4195 crq.control_ip_offload.len =
4196 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4197 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4198 ibmvnic_send_crq(adapter, &crq);
4201 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4202 struct ibmvnic_adapter *adapter)
4204 struct device *dev = &adapter->vdev->dev;
4206 if (crq->get_vpd_size_rsp.rc.code) {
4207 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4208 crq->get_vpd_size_rsp.rc.code);
4209 complete(&adapter->fw_done);
4213 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4214 complete(&adapter->fw_done);
4217 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4218 struct ibmvnic_adapter *adapter)
4220 struct device *dev = &adapter->vdev->dev;
4221 unsigned char *substr = NULL;
4222 u8 fw_level_len = 0;
4224 memset(adapter->fw_version, 0, 32);
4226 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4229 if (crq->get_vpd_rsp.rc.code) {
4230 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4231 crq->get_vpd_rsp.rc.code);
4235 /* get the position of the firmware version info
4236 * located after the ASCII 'RM' substring in the buffer
4238 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4240 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4244 /* get length of firmware level ASCII substring */
4245 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4246 fw_level_len = *(substr + 2);
4248 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4252 /* copy firmware version string from vpd into adapter */
4253 if ((substr + 3 + fw_level_len) <
4254 (adapter->vpd->buff + adapter->vpd->len)) {
4255 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4257 dev_info(dev, "FW substr extrapolated VPD buff\n");
4261 if (adapter->fw_version[0] == '\0')
4262 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4263 complete(&adapter->fw_done);
4266 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4268 struct device *dev = &adapter->vdev->dev;
4269 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4272 dma_unmap_single(dev, adapter->ip_offload_tok,
4273 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4275 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4276 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4277 netdev_dbg(adapter->netdev, "%016lx\n",
4278 ((unsigned long *)(buf))[i]);
4280 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4281 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4282 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4283 buf->tcp_ipv4_chksum);
4284 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4285 buf->tcp_ipv6_chksum);
4286 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4287 buf->udp_ipv4_chksum);
4288 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4289 buf->udp_ipv6_chksum);
4290 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4291 buf->large_tx_ipv4);
4292 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4293 buf->large_tx_ipv6);
4294 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4295 buf->large_rx_ipv4);
4296 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4297 buf->large_rx_ipv6);
4298 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4299 buf->max_ipv4_header_size);
4300 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4301 buf->max_ipv6_header_size);
4302 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4303 buf->max_tcp_header_size);
4304 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4305 buf->max_udp_header_size);
4306 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4307 buf->max_large_tx_size);
4308 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4309 buf->max_large_rx_size);
4310 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4311 buf->ipv6_extension_header);
4312 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4313 buf->tcp_pseudosum_req);
4314 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4315 buf->num_ipv6_ext_headers);
4316 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4317 buf->off_ipv6_ext_headers);
4319 send_control_ip_offload(adapter);
4322 static const char *ibmvnic_fw_err_cause(u16 cause)
4325 case ADAPTER_PROBLEM:
4326 return "adapter problem";
4328 return "bus problem";
4330 return "firmware problem";
4332 return "device driver problem";
4334 return "EEH recovery";
4336 return "firmware updated";
4338 return "low Memory";
4344 static void handle_error_indication(union ibmvnic_crq *crq,
4345 struct ibmvnic_adapter *adapter)
4347 struct device *dev = &adapter->vdev->dev;
4350 cause = be16_to_cpu(crq->error_indication.error_cause);
4352 dev_warn_ratelimited(dev,
4353 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4354 crq->error_indication.flags
4355 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4356 ibmvnic_fw_err_cause(cause));
4358 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4359 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4361 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4364 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4365 struct ibmvnic_adapter *adapter)
4367 struct net_device *netdev = adapter->netdev;
4368 struct device *dev = &adapter->vdev->dev;
4371 rc = crq->change_mac_addr_rsp.rc.code;
4373 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4376 /* crq->change_mac_addr.mac_addr is the requested one
4377 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4379 ether_addr_copy(netdev->dev_addr,
4380 &crq->change_mac_addr_rsp.mac_addr[0]);
4381 ether_addr_copy(adapter->mac_addr,
4382 &crq->change_mac_addr_rsp.mac_addr[0]);
4384 complete(&adapter->fw_done);
4388 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4389 struct ibmvnic_adapter *adapter)
4391 struct device *dev = &adapter->vdev->dev;
4395 atomic_dec(&adapter->running_cap_crqs);
4396 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4398 req_value = &adapter->req_tx_queues;
4402 req_value = &adapter->req_rx_queues;
4405 case REQ_RX_ADD_QUEUES:
4406 req_value = &adapter->req_rx_add_queues;
4409 case REQ_TX_ENTRIES_PER_SUBCRQ:
4410 req_value = &adapter->req_tx_entries_per_subcrq;
4411 name = "tx_entries_per_subcrq";
4413 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4414 req_value = &adapter->req_rx_add_entries_per_subcrq;
4415 name = "rx_add_entries_per_subcrq";
4418 req_value = &adapter->req_mtu;
4421 case PROMISC_REQUESTED:
4422 req_value = &adapter->promisc;
4426 dev_err(dev, "Got invalid cap request rsp %d\n",
4427 crq->request_capability.capability);
4431 switch (crq->request_capability_rsp.rc.code) {
4434 case PARTIALSUCCESS:
4435 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4437 (long)be64_to_cpu(crq->request_capability_rsp.number),
4440 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4442 pr_err("mtu of %llu is not supported. Reverting.\n",
4444 *req_value = adapter->fallback.mtu;
4447 be64_to_cpu(crq->request_capability_rsp.number);
4450 send_request_cap(adapter, 1);
4453 dev_err(dev, "Error %d in request cap rsp\n",
4454 crq->request_capability_rsp.rc.code);
4458 /* Done receiving requested capabilities, query IP offload support */
4459 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4460 adapter->wait_capability = false;
4461 send_query_ip_offload(adapter);
4465 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4466 struct ibmvnic_adapter *adapter)
4468 struct device *dev = &adapter->vdev->dev;
4469 struct net_device *netdev = adapter->netdev;
4470 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4471 struct ibmvnic_login_buffer *login = adapter->login_buf;
4472 u64 *tx_handle_array;
4473 u64 *rx_handle_array;
4479 /* CHECK: Test/set of login_pending does not need to be atomic
4480 * because only ibmvnic_tasklet tests/clears this.
4482 if (!adapter->login_pending) {
4483 netdev_warn(netdev, "Ignoring unexpected login response\n");
4486 adapter->login_pending = false;
4488 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4490 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4491 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4493 /* If the number of queues requested can't be allocated by the
4494 * server, the login response will return with code 1. We will need
4495 * to resend the login buffer with fewer queues requested.
4497 if (login_rsp_crq->generic.rc.code) {
4498 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4499 complete(&adapter->init_done);
4503 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4505 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4506 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4507 netdev_dbg(adapter->netdev, "%016lx\n",
4508 ((unsigned long *)(adapter->login_rsp_buf))[i]);
4512 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4513 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4514 adapter->req_rx_add_queues !=
4515 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4516 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4517 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4520 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4521 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4522 /* variable buffer sizes are not supported, so just read the
4525 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4527 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4528 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4530 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4531 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4532 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4533 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4535 for (i = 0; i < num_tx_pools; i++)
4536 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4538 for (i = 0; i < num_rx_pools; i++)
4539 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4541 adapter->num_active_tx_scrqs = num_tx_pools;
4542 adapter->num_active_rx_scrqs = num_rx_pools;
4543 release_login_rsp_buffer(adapter);
4544 release_login_buffer(adapter);
4545 complete(&adapter->init_done);
4550 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4551 struct ibmvnic_adapter *adapter)
4553 struct device *dev = &adapter->vdev->dev;
4556 rc = crq->request_unmap_rsp.rc.code;
4558 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4561 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4562 struct ibmvnic_adapter *adapter)
4564 struct net_device *netdev = adapter->netdev;
4565 struct device *dev = &adapter->vdev->dev;
4568 rc = crq->query_map_rsp.rc.code;
4570 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4573 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4574 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4575 crq->query_map_rsp.free_pages);
4578 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4579 struct ibmvnic_adapter *adapter)
4581 struct net_device *netdev = adapter->netdev;
4582 struct device *dev = &adapter->vdev->dev;
4585 atomic_dec(&adapter->running_cap_crqs);
4586 netdev_dbg(netdev, "Outstanding queries: %d\n",
4587 atomic_read(&adapter->running_cap_crqs));
4588 rc = crq->query_capability.rc.code;
4590 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4594 switch (be16_to_cpu(crq->query_capability.capability)) {
4596 adapter->min_tx_queues =
4597 be64_to_cpu(crq->query_capability.number);
4598 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4599 adapter->min_tx_queues);
4602 adapter->min_rx_queues =
4603 be64_to_cpu(crq->query_capability.number);
4604 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4605 adapter->min_rx_queues);
4607 case MIN_RX_ADD_QUEUES:
4608 adapter->min_rx_add_queues =
4609 be64_to_cpu(crq->query_capability.number);
4610 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4611 adapter->min_rx_add_queues);
4614 adapter->max_tx_queues =
4615 be64_to_cpu(crq->query_capability.number);
4616 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4617 adapter->max_tx_queues);
4620 adapter->max_rx_queues =
4621 be64_to_cpu(crq->query_capability.number);
4622 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4623 adapter->max_rx_queues);
4625 case MAX_RX_ADD_QUEUES:
4626 adapter->max_rx_add_queues =
4627 be64_to_cpu(crq->query_capability.number);
4628 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4629 adapter->max_rx_add_queues);
4631 case MIN_TX_ENTRIES_PER_SUBCRQ:
4632 adapter->min_tx_entries_per_subcrq =
4633 be64_to_cpu(crq->query_capability.number);
4634 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4635 adapter->min_tx_entries_per_subcrq);
4637 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4638 adapter->min_rx_add_entries_per_subcrq =
4639 be64_to_cpu(crq->query_capability.number);
4640 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4641 adapter->min_rx_add_entries_per_subcrq);
4643 case MAX_TX_ENTRIES_PER_SUBCRQ:
4644 adapter->max_tx_entries_per_subcrq =
4645 be64_to_cpu(crq->query_capability.number);
4646 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4647 adapter->max_tx_entries_per_subcrq);
4649 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4650 adapter->max_rx_add_entries_per_subcrq =
4651 be64_to_cpu(crq->query_capability.number);
4652 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4653 adapter->max_rx_add_entries_per_subcrq);
4655 case TCP_IP_OFFLOAD:
4656 adapter->tcp_ip_offload =
4657 be64_to_cpu(crq->query_capability.number);
4658 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4659 adapter->tcp_ip_offload);
4661 case PROMISC_SUPPORTED:
4662 adapter->promisc_supported =
4663 be64_to_cpu(crq->query_capability.number);
4664 netdev_dbg(netdev, "promisc_supported = %lld\n",
4665 adapter->promisc_supported);
4668 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4669 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4670 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4673 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4674 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4675 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4677 case MAX_MULTICAST_FILTERS:
4678 adapter->max_multicast_filters =
4679 be64_to_cpu(crq->query_capability.number);
4680 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4681 adapter->max_multicast_filters);
4683 case VLAN_HEADER_INSERTION:
4684 adapter->vlan_header_insertion =
4685 be64_to_cpu(crq->query_capability.number);
4686 if (adapter->vlan_header_insertion)
4687 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4688 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4689 adapter->vlan_header_insertion);
4691 case RX_VLAN_HEADER_INSERTION:
4692 adapter->rx_vlan_header_insertion =
4693 be64_to_cpu(crq->query_capability.number);
4694 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4695 adapter->rx_vlan_header_insertion);
4697 case MAX_TX_SG_ENTRIES:
4698 adapter->max_tx_sg_entries =
4699 be64_to_cpu(crq->query_capability.number);
4700 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4701 adapter->max_tx_sg_entries);
4703 case RX_SG_SUPPORTED:
4704 adapter->rx_sg_supported =
4705 be64_to_cpu(crq->query_capability.number);
4706 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4707 adapter->rx_sg_supported);
4709 case OPT_TX_COMP_SUB_QUEUES:
4710 adapter->opt_tx_comp_sub_queues =
4711 be64_to_cpu(crq->query_capability.number);
4712 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4713 adapter->opt_tx_comp_sub_queues);
4715 case OPT_RX_COMP_QUEUES:
4716 adapter->opt_rx_comp_queues =
4717 be64_to_cpu(crq->query_capability.number);
4718 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4719 adapter->opt_rx_comp_queues);
4721 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4722 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4723 be64_to_cpu(crq->query_capability.number);
4724 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4725 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4727 case OPT_TX_ENTRIES_PER_SUBCRQ:
4728 adapter->opt_tx_entries_per_subcrq =
4729 be64_to_cpu(crq->query_capability.number);
4730 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4731 adapter->opt_tx_entries_per_subcrq);
4733 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4734 adapter->opt_rxba_entries_per_subcrq =
4735 be64_to_cpu(crq->query_capability.number);
4736 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4737 adapter->opt_rxba_entries_per_subcrq);
4739 case TX_RX_DESC_REQ:
4740 adapter->tx_rx_desc_req = crq->query_capability.number;
4741 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4742 adapter->tx_rx_desc_req);
4746 netdev_err(netdev, "Got invalid cap rsp %d\n",
4747 crq->query_capability.capability);
4751 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4752 adapter->wait_capability = false;
4753 send_request_cap(adapter, 0);
4757 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4759 union ibmvnic_crq crq;
4762 memset(&crq, 0, sizeof(crq));
4763 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4764 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4766 mutex_lock(&adapter->fw_lock);
4767 adapter->fw_done_rc = 0;
4768 reinit_completion(&adapter->fw_done);
4770 rc = ibmvnic_send_crq(adapter, &crq);
4772 mutex_unlock(&adapter->fw_lock);
4776 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4778 mutex_unlock(&adapter->fw_lock);
4782 mutex_unlock(&adapter->fw_lock);
4783 return adapter->fw_done_rc ? -EIO : 0;
4786 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4787 struct ibmvnic_adapter *adapter)
4789 struct net_device *netdev = adapter->netdev;
4791 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4793 rc = crq->query_phys_parms_rsp.rc.code;
4795 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4799 case IBMVNIC_10MBPS:
4800 adapter->speed = SPEED_10;
4802 case IBMVNIC_100MBPS:
4803 adapter->speed = SPEED_100;
4806 adapter->speed = SPEED_1000;
4808 case IBMVNIC_10GBPS:
4809 adapter->speed = SPEED_10000;
4811 case IBMVNIC_25GBPS:
4812 adapter->speed = SPEED_25000;
4814 case IBMVNIC_40GBPS:
4815 adapter->speed = SPEED_40000;
4817 case IBMVNIC_50GBPS:
4818 adapter->speed = SPEED_50000;
4820 case IBMVNIC_100GBPS:
4821 adapter->speed = SPEED_100000;
4823 case IBMVNIC_200GBPS:
4824 adapter->speed = SPEED_200000;
4827 if (netif_carrier_ok(netdev))
4828 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4829 adapter->speed = SPEED_UNKNOWN;
4831 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4832 adapter->duplex = DUPLEX_FULL;
4833 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4834 adapter->duplex = DUPLEX_HALF;
4836 adapter->duplex = DUPLEX_UNKNOWN;
4841 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4842 struct ibmvnic_adapter *adapter)
4844 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4845 struct net_device *netdev = adapter->netdev;
4846 struct device *dev = &adapter->vdev->dev;
4847 u64 *u64_crq = (u64 *)crq;
4850 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4851 (unsigned long)cpu_to_be64(u64_crq[0]),
4852 (unsigned long)cpu_to_be64(u64_crq[1]));
4853 switch (gen_crq->first) {
4854 case IBMVNIC_CRQ_INIT_RSP:
4855 switch (gen_crq->cmd) {
4856 case IBMVNIC_CRQ_INIT:
4857 dev_info(dev, "Partner initialized\n");
4858 adapter->from_passive_init = true;
4859 /* Discard any stale login responses from prev reset.
4860 * CHECK: should we clear even on INIT_COMPLETE?
4862 adapter->login_pending = false;
4864 if (!completion_done(&adapter->init_done)) {
4865 complete(&adapter->init_done);
4866 adapter->init_done_rc = -EIO;
4868 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4869 if (rc && rc != -EBUSY) {
4870 /* We were unable to schedule the failover
4871 * reset either because the adapter was still
4872 * probing (eg: during kexec) or we could not
4873 * allocate memory. Clear the failover_pending
4874 * flag since no one else will. We ignore
4875 * EBUSY because it means either FAILOVER reset
4876 * is already scheduled or the adapter is
4880 "Error %ld scheduling failover reset\n",
4882 adapter->failover_pending = false;
4885 case IBMVNIC_CRQ_INIT_COMPLETE:
4886 dev_info(dev, "Partner initialization complete\n");
4887 adapter->crq.active = true;
4888 send_version_xchg(adapter);
4891 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4894 case IBMVNIC_CRQ_XPORT_EVENT:
4895 netif_carrier_off(netdev);
4896 adapter->crq.active = false;
4897 /* terminate any thread waiting for a response
4900 if (!completion_done(&adapter->fw_done)) {
4901 adapter->fw_done_rc = -EIO;
4902 complete(&adapter->fw_done);
4904 if (!completion_done(&adapter->stats_done))
4905 complete(&adapter->stats_done);
4906 if (test_bit(0, &adapter->resetting))
4907 adapter->force_reset_recovery = true;
4908 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4909 dev_info(dev, "Migrated, re-enabling adapter\n");
4910 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4911 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4912 dev_info(dev, "Backing device failover detected\n");
4913 adapter->failover_pending = true;
4915 /* The adapter lost the connection */
4916 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4918 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4921 case IBMVNIC_CRQ_CMD_RSP:
4924 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4929 switch (gen_crq->cmd) {
4930 case VERSION_EXCHANGE_RSP:
4931 rc = crq->version_exchange_rsp.rc.code;
4933 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4937 be16_to_cpu(crq->version_exchange_rsp.version);
4938 dev_info(dev, "Partner protocol version is %d\n",
4940 send_query_cap(adapter);
4942 case QUERY_CAPABILITY_RSP:
4943 handle_query_cap_rsp(crq, adapter);
4946 handle_query_map_rsp(crq, adapter);
4948 case REQUEST_MAP_RSP:
4949 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4950 complete(&adapter->fw_done);
4952 case REQUEST_UNMAP_RSP:
4953 handle_request_unmap_rsp(crq, adapter);
4955 case REQUEST_CAPABILITY_RSP:
4956 handle_request_cap_rsp(crq, adapter);
4959 netdev_dbg(netdev, "Got Login Response\n");
4960 handle_login_rsp(crq, adapter);
4962 case LOGICAL_LINK_STATE_RSP:
4964 "Got Logical Link State Response, state: %d rc: %d\n",
4965 crq->logical_link_state_rsp.link_state,
4966 crq->logical_link_state_rsp.rc.code);
4967 adapter->logical_link_state =
4968 crq->logical_link_state_rsp.link_state;
4969 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4970 complete(&adapter->init_done);
4972 case LINK_STATE_INDICATION:
4973 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4974 adapter->phys_link_state =
4975 crq->link_state_indication.phys_link_state;
4976 adapter->logical_link_state =
4977 crq->link_state_indication.logical_link_state;
4978 if (adapter->phys_link_state && adapter->logical_link_state)
4979 netif_carrier_on(netdev);
4981 netif_carrier_off(netdev);
4983 case CHANGE_MAC_ADDR_RSP:
4984 netdev_dbg(netdev, "Got MAC address change Response\n");
4985 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4987 case ERROR_INDICATION:
4988 netdev_dbg(netdev, "Got Error Indication\n");
4989 handle_error_indication(crq, adapter);
4991 case REQUEST_STATISTICS_RSP:
4992 netdev_dbg(netdev, "Got Statistics Response\n");
4993 complete(&adapter->stats_done);
4995 case QUERY_IP_OFFLOAD_RSP:
4996 netdev_dbg(netdev, "Got Query IP offload Response\n");
4997 handle_query_ip_offload_rsp(adapter);
4999 case MULTICAST_CTRL_RSP:
5000 netdev_dbg(netdev, "Got multicast control Response\n");
5002 case CONTROL_IP_OFFLOAD_RSP:
5003 netdev_dbg(netdev, "Got Control IP offload Response\n");
5004 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5005 sizeof(adapter->ip_offload_ctrl),
5007 complete(&adapter->init_done);
5009 case COLLECT_FW_TRACE_RSP:
5010 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5011 complete(&adapter->fw_done);
5013 case GET_VPD_SIZE_RSP:
5014 handle_vpd_size_rsp(crq, adapter);
5017 handle_vpd_rsp(crq, adapter);
5019 case QUERY_PHYS_PARMS_RSP:
5020 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5021 complete(&adapter->fw_done);
5024 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5029 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5031 struct ibmvnic_adapter *adapter = instance;
5033 tasklet_schedule(&adapter->tasklet);
5037 static void ibmvnic_tasklet(struct tasklet_struct *t)
5039 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5040 struct ibmvnic_crq_queue *queue = &adapter->crq;
5041 union ibmvnic_crq *crq;
5042 unsigned long flags;
5045 spin_lock_irqsave(&queue->lock, flags);
5047 /* Pull all the valid messages off the CRQ */
5048 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5049 /* This barrier makes sure ibmvnic_next_crq()'s
5050 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5051 * before ibmvnic_handle_crq()'s
5052 * switch(gen_crq->first) and switch(gen_crq->cmd).
5055 ibmvnic_handle_crq(crq, adapter);
5056 crq->generic.first = 0;
5059 /* remain in tasklet until all
5060 * capabilities responses are received
5062 if (!adapter->wait_capability)
5065 /* if capabilities CRQ's were sent in this tasklet, the following
5066 * tasklet must wait until all responses are received
5068 if (atomic_read(&adapter->running_cap_crqs) != 0)
5069 adapter->wait_capability = true;
5070 spin_unlock_irqrestore(&queue->lock, flags);
5073 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5075 struct vio_dev *vdev = adapter->vdev;
5079 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5080 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5083 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5088 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5090 struct ibmvnic_crq_queue *crq = &adapter->crq;
5091 struct device *dev = &adapter->vdev->dev;
5092 struct vio_dev *vdev = adapter->vdev;
5097 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5098 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5100 /* Clean out the queue */
5104 memset(crq->msgs, 0, PAGE_SIZE);
5106 crq->active = false;
5108 /* And re-open it again */
5109 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5110 crq->msg_token, PAGE_SIZE);
5113 /* Adapter is good, but other end is not ready */
5114 dev_warn(dev, "Partner adapter not ready\n");
5116 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5121 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5123 struct ibmvnic_crq_queue *crq = &adapter->crq;
5124 struct vio_dev *vdev = adapter->vdev;
5130 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5131 free_irq(vdev->irq, adapter);
5132 tasklet_kill(&adapter->tasklet);
5134 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5135 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5137 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5139 free_page((unsigned long)crq->msgs);
5141 crq->active = false;
5144 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5146 struct ibmvnic_crq_queue *crq = &adapter->crq;
5147 struct device *dev = &adapter->vdev->dev;
5148 struct vio_dev *vdev = adapter->vdev;
5149 int rc, retrc = -ENOMEM;
5154 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5155 /* Should we allocate more than one page? */
5160 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5161 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5163 if (dma_mapping_error(dev, crq->msg_token))
5166 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5167 crq->msg_token, PAGE_SIZE);
5169 if (rc == H_RESOURCE)
5170 /* maybe kexecing and resource is busy. try a reset */
5171 rc = ibmvnic_reset_crq(adapter);
5174 if (rc == H_CLOSED) {
5175 dev_warn(dev, "Partner adapter not ready\n");
5177 dev_warn(dev, "Error %d opening adapter\n", rc);
5178 goto reg_crq_failed;
5183 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5185 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5186 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5187 adapter->vdev->unit_address);
5188 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5190 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5192 goto req_irq_failed;
5195 rc = vio_enable_interrupts(vdev);
5197 dev_err(dev, "Error %d enabling interrupts\n", rc);
5198 goto req_irq_failed;
5202 spin_lock_init(&crq->lock);
5207 tasklet_kill(&adapter->tasklet);
5209 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5210 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5212 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5214 free_page((unsigned long)crq->msgs);
5219 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5221 struct device *dev = &adapter->vdev->dev;
5222 unsigned long timeout = msecs_to_jiffies(20000);
5223 u64 old_num_rx_queues = adapter->req_rx_queues;
5224 u64 old_num_tx_queues = adapter->req_tx_queues;
5227 adapter->from_passive_init = false;
5230 reinit_completion(&adapter->init_done);
5232 adapter->init_done_rc = 0;
5233 rc = ibmvnic_send_crq_init(adapter);
5235 dev_err(dev, "Send crq init failed with error %d\n", rc);
5239 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5240 dev_err(dev, "Initialization sequence timed out\n");
5244 if (adapter->init_done_rc) {
5245 release_crq_queue(adapter);
5246 return adapter->init_done_rc;
5249 if (adapter->from_passive_init) {
5250 adapter->state = VNIC_OPEN;
5251 adapter->from_passive_init = false;
5256 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5257 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5258 if (adapter->req_rx_queues != old_num_rx_queues ||
5259 adapter->req_tx_queues != old_num_tx_queues) {
5260 release_sub_crqs(adapter, 0);
5261 rc = init_sub_crqs(adapter);
5263 rc = reset_sub_crq_queues(adapter);
5266 rc = init_sub_crqs(adapter);
5270 dev_err(dev, "Initialization of sub crqs failed\n");
5271 release_crq_queue(adapter);
5275 rc = init_sub_crq_irqs(adapter);
5277 dev_err(dev, "Failed to initialize sub crq irqs\n");
5278 release_crq_queue(adapter);
5284 static struct device_attribute dev_attr_failover;
5286 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5288 struct ibmvnic_adapter *adapter;
5289 struct net_device *netdev;
5290 unsigned char *mac_addr_p;
5293 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5296 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5297 VETH_MAC_ADDR, NULL);
5300 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5301 __FILE__, __LINE__);
5305 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5306 IBMVNIC_MAX_QUEUES);
5310 adapter = netdev_priv(netdev);
5311 adapter->state = VNIC_PROBING;
5312 dev_set_drvdata(&dev->dev, netdev);
5313 adapter->vdev = dev;
5314 adapter->netdev = netdev;
5315 adapter->login_pending = false;
5317 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5318 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5319 netdev->irq = dev->irq;
5320 netdev->netdev_ops = &ibmvnic_netdev_ops;
5321 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5322 SET_NETDEV_DEV(netdev, &dev->dev);
5324 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5325 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5326 __ibmvnic_delayed_reset);
5327 INIT_LIST_HEAD(&adapter->rwi_list);
5328 spin_lock_init(&adapter->rwi_lock);
5329 spin_lock_init(&adapter->state_lock);
5330 mutex_init(&adapter->fw_lock);
5331 init_completion(&adapter->init_done);
5332 init_completion(&adapter->fw_done);
5333 init_completion(&adapter->reset_done);
5334 init_completion(&adapter->stats_done);
5335 clear_bit(0, &adapter->resetting);
5338 rc = init_crq_queue(adapter);
5340 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5342 goto ibmvnic_init_fail;
5345 rc = ibmvnic_reset_init(adapter, false);
5346 if (rc && rc != EAGAIN)
5347 goto ibmvnic_init_fail;
5348 } while (rc == EAGAIN);
5350 rc = init_stats_buffers(adapter);
5352 goto ibmvnic_init_fail;
5354 rc = init_stats_token(adapter);
5356 goto ibmvnic_stats_fail;
5358 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5359 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5360 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5362 rc = device_create_file(&dev->dev, &dev_attr_failover);
5364 goto ibmvnic_dev_file_err;
5366 netif_carrier_off(netdev);
5367 rc = register_netdev(netdev);
5369 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5370 goto ibmvnic_register_fail;
5372 dev_info(&dev->dev, "ibmvnic registered\n");
5374 adapter->state = VNIC_PROBED;
5376 adapter->wait_for_reset = false;
5377 adapter->last_reset_time = jiffies;
5380 ibmvnic_register_fail:
5381 device_remove_file(&dev->dev, &dev_attr_failover);
5383 ibmvnic_dev_file_err:
5384 release_stats_token(adapter);
5387 release_stats_buffers(adapter);
5390 release_sub_crqs(adapter, 1);
5391 release_crq_queue(adapter);
5392 mutex_destroy(&adapter->fw_lock);
5393 free_netdev(netdev);
5398 static void ibmvnic_remove(struct vio_dev *dev)
5400 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5401 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5402 unsigned long flags;
5404 spin_lock_irqsave(&adapter->state_lock, flags);
5406 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5407 * finish. Then, set the state to REMOVING to prevent it from
5408 * scheduling any more work and to have reset functions ignore
5409 * any resets that have already been scheduled. Drop the lock
5410 * after setting state, so __ibmvnic_reset() which is called
5411 * from the flush_work() below, can make progress.
5413 spin_lock(&adapter->rwi_lock);
5414 adapter->state = VNIC_REMOVING;
5415 spin_unlock(&adapter->rwi_lock);
5417 spin_unlock_irqrestore(&adapter->state_lock, flags);
5419 flush_work(&adapter->ibmvnic_reset);
5420 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5423 unregister_netdevice(netdev);
5425 release_resources(adapter);
5426 release_sub_crqs(adapter, 1);
5427 release_crq_queue(adapter);
5429 release_stats_token(adapter);
5430 release_stats_buffers(adapter);
5432 adapter->state = VNIC_REMOVED;
5435 mutex_destroy(&adapter->fw_lock);
5436 device_remove_file(&dev->dev, &dev_attr_failover);
5437 free_netdev(netdev);
5438 dev_set_drvdata(&dev->dev, NULL);
5441 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5442 const char *buf, size_t count)
5444 struct net_device *netdev = dev_get_drvdata(dev);
5445 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5446 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5447 __be64 session_token;
5450 if (!sysfs_streq(buf, "1"))
5453 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5454 H_GET_SESSION_TOKEN, 0, 0, 0);
5456 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5461 session_token = (__be64)retbuf[0];
5462 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5463 be64_to_cpu(session_token));
5464 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5465 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5467 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5475 static DEVICE_ATTR_WO(failover);
5477 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5479 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5480 struct ibmvnic_adapter *adapter;
5481 struct iommu_table *tbl;
5482 unsigned long ret = 0;
5485 tbl = get_iommu_table_base(&vdev->dev);
5487 /* netdev inits at probe time along with the structures we need below*/
5489 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5491 adapter = netdev_priv(netdev);
5493 ret += PAGE_SIZE; /* the crq message queue */
5494 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5496 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5497 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5499 for (i = 0; i < adapter->num_active_rx_pools; i++)
5500 ret += adapter->rx_pool[i].size *
5501 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5506 static int ibmvnic_resume(struct device *dev)
5508 struct net_device *netdev = dev_get_drvdata(dev);
5509 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5511 if (adapter->state != VNIC_OPEN)
5514 tasklet_schedule(&adapter->tasklet);
5519 static const struct vio_device_id ibmvnic_device_table[] = {
5520 {"network", "IBM,vnic"},
5523 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5525 static const struct dev_pm_ops ibmvnic_pm_ops = {
5526 .resume = ibmvnic_resume
5529 static struct vio_driver ibmvnic_driver = {
5530 .id_table = ibmvnic_device_table,
5531 .probe = ibmvnic_probe,
5532 .remove = ibmvnic_remove,
5533 .get_desired_dma = ibmvnic_get_desired_dma,
5534 .name = ibmvnic_driver_name,
5535 .pm = &ibmvnic_pm_ops,
5538 /* module functions */
5539 static int __init ibmvnic_module_init(void)
5541 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5542 IBMVNIC_DRIVER_VERSION);
5544 return vio_register_driver(&ibmvnic_driver);
5547 static void __exit ibmvnic_module_exit(void)
5549 vio_unregister_driver(&ibmvnic_driver);
5552 module_init(ibmvnic_module_init);
5553 module_exit(ibmvnic_module_exit);