1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static int send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
120 static void release_crq_queue(struct ibmvnic_adapter *);
121 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
122 static int init_crq_queue(struct ibmvnic_adapter *adapter);
124 struct ibmvnic_stat {
125 char name[ETH_GSTRING_LEN];
129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
130 offsetof(struct ibmvnic_statistics, stat))
131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
133 static const struct ibmvnic_stat ibmvnic_stats[] = {
134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
144 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
158 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
159 unsigned long length, unsigned long *number,
162 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
165 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
172 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
173 struct ibmvnic_long_term_buff *ltb, int size)
175 struct device *dev = &adapter->vdev->dev;
179 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
183 dev_err(dev, "Couldn't alloc long term buffer\n");
186 ltb->map_id = adapter->map_id;
189 init_completion(&adapter->fw_done);
190 rc = send_request_map(adapter, ltb->addr,
191 ltb->size, ltb->map_id);
193 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
196 wait_for_completion(&adapter->fw_done);
198 if (adapter->fw_done_rc) {
199 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
200 adapter->fw_done_rc);
201 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
207 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_long_term_buff *ltb)
210 struct device *dev = &adapter->vdev->dev;
215 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
216 adapter->reset_reason != VNIC_RESET_MOBILITY)
217 send_request_unmap(adapter, ltb->map_id);
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
221 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
222 struct ibmvnic_long_term_buff *ltb)
226 memset(ltb->buff, 0, ltb->size);
228 init_completion(&adapter->fw_done);
229 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
232 wait_for_completion(&adapter->fw_done);
234 if (adapter->fw_done_rc) {
235 dev_info(&adapter->vdev->dev,
236 "Reset failed, attempting to free and reallocate buffer\n");
237 free_long_term_buff(adapter, ltb);
238 return alloc_long_term_buff(adapter, ltb, ltb->size);
243 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
249 adapter->rx_pool[i].active = 0;
252 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
253 struct ibmvnic_rx_pool *pool)
255 int count = pool->size - atomic_read(&pool->available);
256 struct device *dev = &adapter->vdev->dev;
257 int buffers_added = 0;
258 unsigned long lpar_rc;
259 union sub_crq sub_crq;
272 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
273 be32_to_cpu(adapter->login_rsp_buf->
276 for (i = 0; i < count; ++i) {
277 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
279 dev_err(dev, "Couldn't replenish rx buff\n");
280 adapter->replenish_no_mem++;
284 index = pool->free_map[pool->next_free];
286 if (pool->rx_buff[index].skb)
287 dev_err(dev, "Inconsistent free_map!\n");
289 /* Copy the skb to the long term mapped DMA buffer */
290 offset = index * pool->buff_size;
291 dst = pool->long_term_buff.buff + offset;
292 memset(dst, 0, pool->buff_size);
293 dma_addr = pool->long_term_buff.addr + offset;
294 pool->rx_buff[index].data = dst;
296 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
297 pool->rx_buff[index].dma = dma_addr;
298 pool->rx_buff[index].skb = skb;
299 pool->rx_buff[index].pool_index = pool->index;
300 pool->rx_buff[index].size = pool->buff_size;
302 memset(&sub_crq, 0, sizeof(sub_crq));
303 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
304 sub_crq.rx_add.correlator =
305 cpu_to_be64((u64)&pool->rx_buff[index]);
306 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
307 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
309 /* The length field of the sCRQ is defined to be 24 bits so the
310 * buffer size needs to be left shifted by a byte before it is
311 * converted to big endian to prevent the last byte from being
314 #ifdef __LITTLE_ENDIAN__
317 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
319 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
321 if (lpar_rc != H_SUCCESS)
325 adapter->replenish_add_buff_success++;
326 pool->next_free = (pool->next_free + 1) % pool->size;
328 atomic_add(buffers_added, &pool->available);
332 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
333 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
334 pool->free_map[pool->next_free] = index;
335 pool->rx_buff[index].skb = NULL;
337 dev_kfree_skb_any(skb);
338 adapter->replenish_add_buff_failure++;
339 atomic_add(buffers_added, &pool->available);
341 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
342 /* Disable buffer pool replenishment and report carrier off if
343 * queue is closed or pending failover.
344 * Firmware guarantees that a signal will be sent to the
345 * driver, triggering a reset.
347 deactivate_rx_pools(adapter);
348 netif_carrier_off(adapter->netdev);
352 static void replenish_pools(struct ibmvnic_adapter *adapter)
356 adapter->replenish_task_cycles++;
357 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
359 if (adapter->rx_pool[i].active)
360 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
364 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
366 kfree(adapter->tx_stats_buffers);
367 kfree(adapter->rx_stats_buffers);
368 adapter->tx_stats_buffers = NULL;
369 adapter->rx_stats_buffers = NULL;
372 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
374 adapter->tx_stats_buffers =
375 kcalloc(IBMVNIC_MAX_QUEUES,
376 sizeof(struct ibmvnic_tx_queue_stats),
378 if (!adapter->tx_stats_buffers)
381 adapter->rx_stats_buffers =
382 kcalloc(IBMVNIC_MAX_QUEUES,
383 sizeof(struct ibmvnic_rx_queue_stats),
385 if (!adapter->rx_stats_buffers)
391 static void release_stats_token(struct ibmvnic_adapter *adapter)
393 struct device *dev = &adapter->vdev->dev;
395 if (!adapter->stats_token)
398 dma_unmap_single(dev, adapter->stats_token,
399 sizeof(struct ibmvnic_statistics),
401 adapter->stats_token = 0;
404 static int init_stats_token(struct ibmvnic_adapter *adapter)
406 struct device *dev = &adapter->vdev->dev;
409 stok = dma_map_single(dev, &adapter->stats,
410 sizeof(struct ibmvnic_statistics),
412 if (dma_mapping_error(dev, stok)) {
413 dev_err(dev, "Couldn't map stats buffer\n");
417 adapter->stats_token = stok;
418 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
422 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
424 struct ibmvnic_rx_pool *rx_pool;
429 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
430 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
432 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
433 for (i = 0; i < rx_scrqs; i++) {
434 rx_pool = &adapter->rx_pool[i];
436 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
438 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
439 free_long_term_buff(adapter, &rx_pool->long_term_buff);
440 rx_pool->buff_size = be64_to_cpu(size_array[i]);
441 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
445 rc = reset_long_term_buff(adapter,
446 &rx_pool->long_term_buff);
452 for (j = 0; j < rx_pool->size; j++)
453 rx_pool->free_map[j] = j;
455 memset(rx_pool->rx_buff, 0,
456 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
458 atomic_set(&rx_pool->available, 0);
459 rx_pool->next_alloc = 0;
460 rx_pool->next_free = 0;
467 static void release_rx_pools(struct ibmvnic_adapter *adapter)
469 struct ibmvnic_rx_pool *rx_pool;
472 if (!adapter->rx_pool)
475 for (i = 0; i < adapter->num_active_rx_pools; i++) {
476 rx_pool = &adapter->rx_pool[i];
478 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
480 kfree(rx_pool->free_map);
481 free_long_term_buff(adapter, &rx_pool->long_term_buff);
483 if (!rx_pool->rx_buff)
486 for (j = 0; j < rx_pool->size; j++) {
487 if (rx_pool->rx_buff[j].skb) {
488 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
489 rx_pool->rx_buff[i].skb = NULL;
493 kfree(rx_pool->rx_buff);
496 kfree(adapter->rx_pool);
497 adapter->rx_pool = NULL;
498 adapter->num_active_rx_pools = 0;
501 static int init_rx_pools(struct net_device *netdev)
503 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
504 struct device *dev = &adapter->vdev->dev;
505 struct ibmvnic_rx_pool *rx_pool;
511 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
512 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
513 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
515 adapter->rx_pool = kcalloc(rxadd_subcrqs,
516 sizeof(struct ibmvnic_rx_pool),
518 if (!adapter->rx_pool) {
519 dev_err(dev, "Failed to allocate rx pools\n");
523 adapter->num_active_rx_pools = rxadd_subcrqs;
525 for (i = 0; i < rxadd_subcrqs; i++) {
526 rx_pool = &adapter->rx_pool[i];
528 netdev_dbg(adapter->netdev,
529 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
530 i, adapter->req_rx_add_entries_per_subcrq,
531 be64_to_cpu(size_array[i]));
533 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
535 rx_pool->buff_size = be64_to_cpu(size_array[i]);
538 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
540 if (!rx_pool->free_map) {
541 release_rx_pools(adapter);
545 rx_pool->rx_buff = kcalloc(rx_pool->size,
546 sizeof(struct ibmvnic_rx_buff),
548 if (!rx_pool->rx_buff) {
549 dev_err(dev, "Couldn't alloc rx buffers\n");
550 release_rx_pools(adapter);
554 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
555 rx_pool->size * rx_pool->buff_size)) {
556 release_rx_pools(adapter);
560 for (j = 0; j < rx_pool->size; ++j)
561 rx_pool->free_map[j] = j;
563 atomic_set(&rx_pool->available, 0);
564 rx_pool->next_alloc = 0;
565 rx_pool->next_free = 0;
571 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
572 struct ibmvnic_tx_pool *tx_pool)
576 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
580 memset(tx_pool->tx_buff, 0,
581 tx_pool->num_buffers *
582 sizeof(struct ibmvnic_tx_buff));
584 for (i = 0; i < tx_pool->num_buffers; i++)
585 tx_pool->free_map[i] = i;
587 tx_pool->consumer_index = 0;
588 tx_pool->producer_index = 0;
593 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
598 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
599 for (i = 0; i < tx_scrqs; i++) {
600 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
603 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
611 static void release_vpd_data(struct ibmvnic_adapter *adapter)
616 kfree(adapter->vpd->buff);
622 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
623 struct ibmvnic_tx_pool *tx_pool)
625 kfree(tx_pool->tx_buff);
626 kfree(tx_pool->free_map);
627 free_long_term_buff(adapter, &tx_pool->long_term_buff);
630 static void release_tx_pools(struct ibmvnic_adapter *adapter)
634 if (!adapter->tx_pool)
637 for (i = 0; i < adapter->num_active_tx_pools; i++) {
638 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
639 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
642 kfree(adapter->tx_pool);
643 adapter->tx_pool = NULL;
644 kfree(adapter->tso_pool);
645 adapter->tso_pool = NULL;
646 adapter->num_active_tx_pools = 0;
649 static int init_one_tx_pool(struct net_device *netdev,
650 struct ibmvnic_tx_pool *tx_pool,
651 int num_entries, int buf_size)
653 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
656 tx_pool->tx_buff = kcalloc(num_entries,
657 sizeof(struct ibmvnic_tx_buff),
659 if (!tx_pool->tx_buff)
662 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
663 num_entries * buf_size))
666 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
667 if (!tx_pool->free_map)
670 for (i = 0; i < num_entries; i++)
671 tx_pool->free_map[i] = i;
673 tx_pool->consumer_index = 0;
674 tx_pool->producer_index = 0;
675 tx_pool->num_buffers = num_entries;
676 tx_pool->buf_size = buf_size;
681 static int init_tx_pools(struct net_device *netdev)
683 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
687 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
688 adapter->tx_pool = kcalloc(tx_subcrqs,
689 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
690 if (!adapter->tx_pool)
693 adapter->tso_pool = kcalloc(tx_subcrqs,
694 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
695 if (!adapter->tso_pool)
698 adapter->num_active_tx_pools = tx_subcrqs;
700 for (i = 0; i < tx_subcrqs; i++) {
701 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
702 adapter->req_tx_entries_per_subcrq,
703 adapter->req_mtu + VLAN_HLEN);
705 release_tx_pools(adapter);
709 init_one_tx_pool(netdev, &adapter->tso_pool[i],
713 release_tx_pools(adapter);
721 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
725 if (adapter->napi_enabled)
728 for (i = 0; i < adapter->req_rx_queues; i++)
729 napi_enable(&adapter->napi[i]);
731 adapter->napi_enabled = true;
734 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
738 if (!adapter->napi_enabled)
741 for (i = 0; i < adapter->req_rx_queues; i++) {
742 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
743 napi_disable(&adapter->napi[i]);
746 adapter->napi_enabled = false;
749 static int init_napi(struct ibmvnic_adapter *adapter)
753 adapter->napi = kcalloc(adapter->req_rx_queues,
754 sizeof(struct napi_struct), GFP_KERNEL);
758 for (i = 0; i < adapter->req_rx_queues; i++) {
759 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
760 netif_napi_add(adapter->netdev, &adapter->napi[i],
761 ibmvnic_poll, NAPI_POLL_WEIGHT);
764 adapter->num_active_rx_napi = adapter->req_rx_queues;
768 static void release_napi(struct ibmvnic_adapter *adapter)
775 for (i = 0; i < adapter->num_active_rx_napi; i++) {
776 if (&adapter->napi[i]) {
777 netdev_dbg(adapter->netdev,
778 "Releasing napi[%d]\n", i);
779 netif_napi_del(&adapter->napi[i]);
783 kfree(adapter->napi);
784 adapter->napi = NULL;
785 adapter->num_active_rx_napi = 0;
786 adapter->napi_enabled = false;
789 static int ibmvnic_login(struct net_device *netdev)
791 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
792 unsigned long timeout = msecs_to_jiffies(30000);
799 if (retry_count > IBMVNIC_MAX_QUEUES) {
800 netdev_warn(netdev, "Login attempts exceeded\n");
804 adapter->init_done_rc = 0;
805 reinit_completion(&adapter->init_done);
806 rc = send_login(adapter);
808 netdev_warn(netdev, "Unable to login\n");
812 if (!wait_for_completion_timeout(&adapter->init_done,
814 netdev_warn(netdev, "Login timed out\n");
818 if (adapter->init_done_rc == PARTIALSUCCESS) {
820 release_sub_crqs(adapter, 1);
824 "Received partial success, retrying...\n");
825 adapter->init_done_rc = 0;
826 reinit_completion(&adapter->init_done);
827 send_cap_queries(adapter);
828 if (!wait_for_completion_timeout(&adapter->init_done,
831 "Capabilities query timed out\n");
835 rc = init_sub_crqs(adapter);
838 "SCRQ initialization failed\n");
842 rc = init_sub_crq_irqs(adapter);
845 "SCRQ irq initialization failed\n");
848 } else if (adapter->init_done_rc) {
849 netdev_warn(netdev, "Adapter login failed\n");
854 /* handle pending MAC address changes after successful login */
855 if (adapter->mac_change_pending) {
856 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
857 adapter->mac_change_pending = false;
863 static void release_login_buffer(struct ibmvnic_adapter *adapter)
865 kfree(adapter->login_buf);
866 adapter->login_buf = NULL;
869 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
871 kfree(adapter->login_rsp_buf);
872 adapter->login_rsp_buf = NULL;
875 static void release_resources(struct ibmvnic_adapter *adapter)
877 release_vpd_data(adapter);
879 release_tx_pools(adapter);
880 release_rx_pools(adapter);
882 release_napi(adapter);
883 release_login_rsp_buffer(adapter);
886 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
888 struct net_device *netdev = adapter->netdev;
889 unsigned long timeout = msecs_to_jiffies(30000);
890 union ibmvnic_crq crq;
894 netdev_dbg(netdev, "setting link state %d\n", link_state);
896 memset(&crq, 0, sizeof(crq));
897 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
898 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
899 crq.logical_link_state.link_state = link_state;
904 reinit_completion(&adapter->init_done);
905 rc = ibmvnic_send_crq(adapter, &crq);
907 netdev_err(netdev, "Failed to set link state\n");
911 if (!wait_for_completion_timeout(&adapter->init_done,
913 netdev_err(netdev, "timeout setting link state\n");
917 if (adapter->init_done_rc == 1) {
918 /* Partuial success, delay and re-send */
921 } else if (adapter->init_done_rc) {
922 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
923 adapter->init_done_rc);
924 return adapter->init_done_rc;
931 static int set_real_num_queues(struct net_device *netdev)
933 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
936 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
937 adapter->req_tx_queues, adapter->req_rx_queues);
939 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
941 netdev_err(netdev, "failed to set the number of tx queues\n");
945 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
947 netdev_err(netdev, "failed to set the number of rx queues\n");
952 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
954 struct device *dev = &adapter->vdev->dev;
955 union ibmvnic_crq crq;
959 if (adapter->vpd->buff)
960 len = adapter->vpd->len;
962 init_completion(&adapter->fw_done);
963 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
964 crq.get_vpd_size.cmd = GET_VPD_SIZE;
965 rc = ibmvnic_send_crq(adapter, &crq);
968 wait_for_completion(&adapter->fw_done);
970 if (!adapter->vpd->len)
973 if (!adapter->vpd->buff)
974 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
975 else if (adapter->vpd->len != len)
977 krealloc(adapter->vpd->buff,
978 adapter->vpd->len, GFP_KERNEL);
980 if (!adapter->vpd->buff) {
981 dev_err(dev, "Could allocate VPD buffer\n");
985 adapter->vpd->dma_addr =
986 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
988 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
989 dev_err(dev, "Could not map VPD buffer\n");
990 kfree(adapter->vpd->buff);
991 adapter->vpd->buff = NULL;
995 reinit_completion(&adapter->fw_done);
996 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
997 crq.get_vpd.cmd = GET_VPD;
998 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
999 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1000 rc = ibmvnic_send_crq(adapter, &crq);
1002 kfree(adapter->vpd->buff);
1003 adapter->vpd->buff = NULL;
1006 wait_for_completion(&adapter->fw_done);
1011 static int init_resources(struct ibmvnic_adapter *adapter)
1013 struct net_device *netdev = adapter->netdev;
1016 rc = set_real_num_queues(netdev);
1020 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1024 /* Vital Product Data (VPD) */
1025 rc = ibmvnic_get_vpd(adapter);
1027 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1031 adapter->map_id = 1;
1033 rc = init_napi(adapter);
1037 send_map_query(adapter);
1039 rc = init_rx_pools(netdev);
1043 rc = init_tx_pools(netdev);
1047 static int __ibmvnic_open(struct net_device *netdev)
1049 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1050 enum vnic_state prev_state = adapter->state;
1053 adapter->state = VNIC_OPENING;
1054 replenish_pools(adapter);
1055 ibmvnic_napi_enable(adapter);
1057 /* We're ready to receive frames, enable the sub-crq interrupts and
1058 * set the logical link state to up
1060 for (i = 0; i < adapter->req_rx_queues; i++) {
1061 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1062 if (prev_state == VNIC_CLOSED)
1063 enable_irq(adapter->rx_scrq[i]->irq);
1064 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1067 for (i = 0; i < adapter->req_tx_queues; i++) {
1068 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1069 if (prev_state == VNIC_CLOSED)
1070 enable_irq(adapter->tx_scrq[i]->irq);
1071 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1074 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1076 for (i = 0; i < adapter->req_rx_queues; i++)
1077 napi_disable(&adapter->napi[i]);
1078 release_resources(adapter);
1082 netif_tx_start_all_queues(netdev);
1084 if (prev_state == VNIC_CLOSED) {
1085 for (i = 0; i < adapter->req_rx_queues; i++)
1086 napi_schedule(&adapter->napi[i]);
1089 adapter->state = VNIC_OPEN;
1093 static int ibmvnic_open(struct net_device *netdev)
1095 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1098 /* If device failover is pending, just set device state and return.
1099 * Device operation will be handled by reset routine.
1101 if (adapter->failover_pending) {
1102 adapter->state = VNIC_OPEN;
1106 mutex_lock(&adapter->reset_lock);
1108 if (adapter->state != VNIC_CLOSED) {
1109 rc = ibmvnic_login(netdev);
1111 mutex_unlock(&adapter->reset_lock);
1115 rc = init_resources(adapter);
1117 netdev_err(netdev, "failed to initialize resources\n");
1118 release_resources(adapter);
1119 mutex_unlock(&adapter->reset_lock);
1124 rc = __ibmvnic_open(netdev);
1125 netif_carrier_on(netdev);
1127 mutex_unlock(&adapter->reset_lock);
1132 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1134 struct ibmvnic_rx_pool *rx_pool;
1135 struct ibmvnic_rx_buff *rx_buff;
1140 if (!adapter->rx_pool)
1143 rx_scrqs = adapter->num_active_rx_pools;
1144 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1146 /* Free any remaining skbs in the rx buffer pools */
1147 for (i = 0; i < rx_scrqs; i++) {
1148 rx_pool = &adapter->rx_pool[i];
1149 if (!rx_pool || !rx_pool->rx_buff)
1152 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1153 for (j = 0; j < rx_entries; j++) {
1154 rx_buff = &rx_pool->rx_buff[j];
1155 if (rx_buff && rx_buff->skb) {
1156 dev_kfree_skb_any(rx_buff->skb);
1157 rx_buff->skb = NULL;
1163 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1164 struct ibmvnic_tx_pool *tx_pool)
1166 struct ibmvnic_tx_buff *tx_buff;
1170 if (!tx_pool || !tx_pool->tx_buff)
1173 tx_entries = tx_pool->num_buffers;
1175 for (i = 0; i < tx_entries; i++) {
1176 tx_buff = &tx_pool->tx_buff[i];
1177 if (tx_buff && tx_buff->skb) {
1178 dev_kfree_skb_any(tx_buff->skb);
1179 tx_buff->skb = NULL;
1184 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1189 if (!adapter->tx_pool || !adapter->tso_pool)
1192 tx_scrqs = adapter->num_active_tx_pools;
1194 /* Free any remaining skbs in the tx buffer pools */
1195 for (i = 0; i < tx_scrqs; i++) {
1196 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1197 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1198 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1202 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1204 struct net_device *netdev = adapter->netdev;
1207 if (adapter->tx_scrq) {
1208 for (i = 0; i < adapter->req_tx_queues; i++)
1209 if (adapter->tx_scrq[i]->irq) {
1211 "Disabling tx_scrq[%d] irq\n", i);
1212 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1213 disable_irq(adapter->tx_scrq[i]->irq);
1217 if (adapter->rx_scrq) {
1218 for (i = 0; i < adapter->req_rx_queues; i++) {
1219 if (adapter->rx_scrq[i]->irq) {
1221 "Disabling rx_scrq[%d] irq\n", i);
1222 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1223 disable_irq(adapter->rx_scrq[i]->irq);
1229 static void ibmvnic_cleanup(struct net_device *netdev)
1231 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1233 /* ensure that transmissions are stopped if called by do_reset */
1234 if (adapter->resetting)
1235 netif_tx_disable(netdev);
1237 netif_tx_stop_all_queues(netdev);
1239 ibmvnic_napi_disable(adapter);
1240 ibmvnic_disable_irqs(adapter);
1242 clean_rx_pools(adapter);
1243 clean_tx_pools(adapter);
1246 static int __ibmvnic_close(struct net_device *netdev)
1248 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1251 adapter->state = VNIC_CLOSING;
1252 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1255 adapter->state = VNIC_CLOSED;
1259 static int ibmvnic_close(struct net_device *netdev)
1261 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1264 /* If device failover is pending, just set device state and return.
1265 * Device operation will be handled by reset routine.
1267 if (adapter->failover_pending) {
1268 adapter->state = VNIC_CLOSED;
1272 mutex_lock(&adapter->reset_lock);
1273 rc = __ibmvnic_close(netdev);
1274 ibmvnic_cleanup(netdev);
1275 mutex_unlock(&adapter->reset_lock);
1281 * build_hdr_data - creates L2/L3/L4 header data buffer
1282 * @hdr_field - bitfield determining needed headers
1283 * @skb - socket buffer
1284 * @hdr_len - array of header lengths
1285 * @tot_len - total length of data
1287 * Reads hdr_field to determine which headers are needed by firmware.
1288 * Builds a buffer containing these headers. Saves individual header
1289 * lengths and total buffer length to be used to build descriptors.
1291 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1292 int *hdr_len, u8 *hdr_data)
1297 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1298 hdr_len[0] = sizeof(struct vlan_ethhdr);
1300 hdr_len[0] = sizeof(struct ethhdr);
1302 if (skb->protocol == htons(ETH_P_IP)) {
1303 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1304 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1305 hdr_len[2] = tcp_hdrlen(skb);
1306 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1307 hdr_len[2] = sizeof(struct udphdr);
1308 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1309 hdr_len[1] = sizeof(struct ipv6hdr);
1310 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1311 hdr_len[2] = tcp_hdrlen(skb);
1312 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1313 hdr_len[2] = sizeof(struct udphdr);
1314 } else if (skb->protocol == htons(ETH_P_ARP)) {
1315 hdr_len[1] = arp_hdr_len(skb->dev);
1319 memset(hdr_data, 0, 120);
1320 if ((hdr_field >> 6) & 1) {
1321 hdr = skb_mac_header(skb);
1322 memcpy(hdr_data, hdr, hdr_len[0]);
1326 if ((hdr_field >> 5) & 1) {
1327 hdr = skb_network_header(skb);
1328 memcpy(hdr_data + len, hdr, hdr_len[1]);
1332 if ((hdr_field >> 4) & 1) {
1333 hdr = skb_transport_header(skb);
1334 memcpy(hdr_data + len, hdr, hdr_len[2]);
1341 * create_hdr_descs - create header and header extension descriptors
1342 * @hdr_field - bitfield determining needed headers
1343 * @data - buffer containing header data
1344 * @len - length of data buffer
1345 * @hdr_len - array of individual header lengths
1346 * @scrq_arr - descriptor array
1348 * Creates header and, if needed, header extension descriptors and
1349 * places them in a descriptor array, scrq_arr
1352 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1353 union sub_crq *scrq_arr)
1355 union sub_crq hdr_desc;
1361 while (tmp_len > 0) {
1362 cur = hdr_data + len - tmp_len;
1364 memset(&hdr_desc, 0, sizeof(hdr_desc));
1365 if (cur != hdr_data) {
1366 data = hdr_desc.hdr_ext.data;
1367 tmp = tmp_len > 29 ? 29 : tmp_len;
1368 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1369 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1370 hdr_desc.hdr_ext.len = tmp;
1372 data = hdr_desc.hdr.data;
1373 tmp = tmp_len > 24 ? 24 : tmp_len;
1374 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1375 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1376 hdr_desc.hdr.len = tmp;
1377 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1378 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1379 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1380 hdr_desc.hdr.flag = hdr_field << 1;
1382 memcpy(data, cur, tmp);
1384 *scrq_arr = hdr_desc;
1393 * build_hdr_descs_arr - build a header descriptor array
1394 * @skb - socket buffer
1395 * @num_entries - number of descriptors to be sent
1396 * @subcrq - first TX descriptor
1397 * @hdr_field - bit field determining which headers will be sent
1399 * This function will build a TX descriptor array with applicable
1400 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1403 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1404 int *num_entries, u8 hdr_field)
1406 int hdr_len[3] = {0, 0, 0};
1408 u8 *hdr_data = txbuff->hdr_data;
1410 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1412 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1413 txbuff->indir_arr + 1);
1416 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1417 struct net_device *netdev)
1419 /* For some backing devices, mishandling of small packets
1420 * can result in a loss of connection or TX stall. Device
1421 * architects recommend that no packet should be smaller
1422 * than the minimum MTU value provided to the driver, so
1423 * pad any packets to that length
1425 if (skb->len < netdev->min_mtu)
1426 return skb_put_padto(skb, netdev->min_mtu);
1431 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1433 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1434 int queue_num = skb_get_queue_mapping(skb);
1435 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1436 struct device *dev = &adapter->vdev->dev;
1437 struct ibmvnic_tx_buff *tx_buff = NULL;
1438 struct ibmvnic_sub_crq_queue *tx_scrq;
1439 struct ibmvnic_tx_pool *tx_pool;
1440 unsigned int tx_send_failed = 0;
1441 unsigned int tx_map_failed = 0;
1442 unsigned int tx_dropped = 0;
1443 unsigned int tx_packets = 0;
1444 unsigned int tx_bytes = 0;
1445 dma_addr_t data_dma_addr;
1446 struct netdev_queue *txq;
1447 unsigned long lpar_rc;
1448 union sub_crq tx_crq;
1449 unsigned int offset;
1450 int num_entries = 1;
1457 if (adapter->resetting) {
1458 if (!netif_subqueue_stopped(netdev, skb))
1459 netif_stop_subqueue(netdev, queue_num);
1460 dev_kfree_skb_any(skb);
1468 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1474 if (skb_is_gso(skb))
1475 tx_pool = &adapter->tso_pool[queue_num];
1477 tx_pool = &adapter->tx_pool[queue_num];
1479 tx_scrq = adapter->tx_scrq[queue_num];
1480 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1481 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1482 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1484 index = tx_pool->free_map[tx_pool->consumer_index];
1486 if (index == IBMVNIC_INVALID_MAP) {
1487 dev_kfree_skb_any(skb);
1494 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1496 offset = index * tx_pool->buf_size;
1497 dst = tx_pool->long_term_buff.buff + offset;
1498 memset(dst, 0, tx_pool->buf_size);
1499 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1501 if (skb_shinfo(skb)->nr_frags) {
1505 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1506 cur = skb_headlen(skb);
1508 /* Copy the frags */
1509 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1510 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1513 page_address(skb_frag_page(frag)) +
1514 frag->page_offset, skb_frag_size(frag));
1515 cur += skb_frag_size(frag);
1518 skb_copy_from_linear_data(skb, dst, skb->len);
1521 tx_pool->consumer_index =
1522 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1524 tx_buff = &tx_pool->tx_buff[index];
1526 tx_buff->data_dma[0] = data_dma_addr;
1527 tx_buff->data_len[0] = skb->len;
1528 tx_buff->index = index;
1529 tx_buff->pool_index = queue_num;
1530 tx_buff->last_frag = true;
1532 memset(&tx_crq, 0, sizeof(tx_crq));
1533 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1534 tx_crq.v1.type = IBMVNIC_TX_DESC;
1535 tx_crq.v1.n_crq_elem = 1;
1536 tx_crq.v1.n_sge = 1;
1537 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1539 if (skb_is_gso(skb))
1540 tx_crq.v1.correlator =
1541 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1543 tx_crq.v1.correlator = cpu_to_be32(index);
1544 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1545 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1546 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1548 if (adapter->vlan_header_insertion) {
1549 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1550 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1553 if (skb->protocol == htons(ETH_P_IP)) {
1554 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1555 proto = ip_hdr(skb)->protocol;
1556 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1557 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1558 proto = ipv6_hdr(skb)->nexthdr;
1561 if (proto == IPPROTO_TCP)
1562 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1563 else if (proto == IPPROTO_UDP)
1564 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1566 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1567 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1570 if (skb_is_gso(skb)) {
1571 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1572 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1575 /* determine if l2/3/4 headers are sent to firmware */
1576 if ((*hdrs >> 7) & 1) {
1577 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1578 tx_crq.v1.n_crq_elem = num_entries;
1579 tx_buff->num_entries = num_entries;
1580 tx_buff->indir_arr[0] = tx_crq;
1581 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1582 sizeof(tx_buff->indir_arr),
1584 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1585 dev_kfree_skb_any(skb);
1586 tx_buff->skb = NULL;
1587 if (!firmware_has_feature(FW_FEATURE_CMO))
1588 dev_err(dev, "tx: unable to map descriptor array\n");
1594 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1595 (u64)tx_buff->indir_dma,
1598 tx_buff->num_entries = num_entries;
1599 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1602 if (lpar_rc != H_SUCCESS) {
1603 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1604 dev_err_ratelimited(dev, "tx: send failed\n");
1605 dev_kfree_skb_any(skb);
1606 tx_buff->skb = NULL;
1608 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1609 /* Disable TX and report carrier off if queue is closed
1610 * or pending failover.
1611 * Firmware guarantees that a signal will be sent to the
1612 * driver, triggering a reset or some other action.
1614 netif_tx_stop_all_queues(netdev);
1615 netif_carrier_off(netdev);
1624 if (atomic_add_return(num_entries, &tx_scrq->used)
1625 >= adapter->req_tx_entries_per_subcrq) {
1626 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1627 netif_stop_subqueue(netdev, queue_num);
1631 tx_bytes += skb->len;
1632 txq->trans_start = jiffies;
1637 /* roll back consumer index and map array*/
1638 if (tx_pool->consumer_index == 0)
1639 tx_pool->consumer_index =
1640 tx_pool->num_buffers - 1;
1642 tx_pool->consumer_index--;
1643 tx_pool->free_map[tx_pool->consumer_index] = index;
1645 netdev->stats.tx_dropped += tx_dropped;
1646 netdev->stats.tx_bytes += tx_bytes;
1647 netdev->stats.tx_packets += tx_packets;
1648 adapter->tx_send_failed += tx_send_failed;
1649 adapter->tx_map_failed += tx_map_failed;
1650 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1651 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1652 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1657 static void ibmvnic_set_multi(struct net_device *netdev)
1659 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1660 struct netdev_hw_addr *ha;
1661 union ibmvnic_crq crq;
1663 memset(&crq, 0, sizeof(crq));
1664 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1665 crq.request_capability.cmd = REQUEST_CAPABILITY;
1667 if (netdev->flags & IFF_PROMISC) {
1668 if (!adapter->promisc_supported)
1671 if (netdev->flags & IFF_ALLMULTI) {
1672 /* Accept all multicast */
1673 memset(&crq, 0, sizeof(crq));
1674 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1675 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1676 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1677 ibmvnic_send_crq(adapter, &crq);
1678 } else if (netdev_mc_empty(netdev)) {
1679 /* Reject all multicast */
1680 memset(&crq, 0, sizeof(crq));
1681 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1682 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1683 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1684 ibmvnic_send_crq(adapter, &crq);
1686 /* Accept one or more multicast(s) */
1687 netdev_for_each_mc_addr(ha, netdev) {
1688 memset(&crq, 0, sizeof(crq));
1689 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1690 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1691 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1692 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1694 ibmvnic_send_crq(adapter, &crq);
1700 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1702 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1703 struct sockaddr *addr = p;
1704 union ibmvnic_crq crq;
1707 if (!is_valid_ether_addr(addr->sa_data))
1708 return -EADDRNOTAVAIL;
1710 memset(&crq, 0, sizeof(crq));
1711 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1712 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1713 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1715 init_completion(&adapter->fw_done);
1716 rc = ibmvnic_send_crq(adapter, &crq);
1719 wait_for_completion(&adapter->fw_done);
1720 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1721 return adapter->fw_done_rc ? -EIO : 0;
1724 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1726 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1727 struct sockaddr *addr = p;
1730 if (adapter->state == VNIC_PROBED) {
1731 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1732 adapter->mac_change_pending = true;
1736 rc = __ibmvnic_set_mac(netdev, addr);
1742 * do_reset returns zero if we are able to keep processing reset events, or
1743 * non-zero if we hit a fatal error and must halt.
1745 static int do_reset(struct ibmvnic_adapter *adapter,
1746 struct ibmvnic_rwi *rwi, u32 reset_state)
1748 u64 old_num_rx_queues, old_num_tx_queues;
1749 struct net_device *netdev = adapter->netdev;
1752 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1755 netif_carrier_off(netdev);
1756 adapter->reset_reason = rwi->reset_reason;
1758 old_num_rx_queues = adapter->req_rx_queues;
1759 old_num_tx_queues = adapter->req_tx_queues;
1761 ibmvnic_cleanup(netdev);
1763 if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
1764 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1765 rc = __ibmvnic_close(netdev);
1770 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1771 adapter->wait_for_reset) {
1772 release_resources(adapter);
1773 release_sub_crqs(adapter, 1);
1774 release_crq_queue(adapter);
1777 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1778 /* remove the closed state so when we call open it appears
1779 * we are coming from the probed state.
1781 adapter->state = VNIC_PROBED;
1783 if (adapter->wait_for_reset) {
1784 rc = init_crq_queue(adapter);
1785 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1786 rc = ibmvnic_reenable_crq_queue(adapter);
1787 release_sub_crqs(adapter, 1);
1789 rc = ibmvnic_reset_crq(adapter);
1791 rc = vio_enable_interrupts(adapter->vdev);
1795 netdev_err(adapter->netdev,
1796 "Couldn't initialize crq. rc=%d\n", rc);
1800 rc = ibmvnic_reset_init(adapter);
1802 return IBMVNIC_INIT_FAILED;
1804 /* If the adapter was in PROBE state prior to the reset,
1807 if (reset_state == VNIC_PROBED)
1810 rc = ibmvnic_login(netdev);
1812 adapter->state = reset_state;
1816 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1817 adapter->wait_for_reset) {
1818 rc = init_resources(adapter);
1821 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1822 adapter->req_tx_queues != old_num_tx_queues) {
1823 adapter->map_id = 1;
1824 release_rx_pools(adapter);
1825 release_tx_pools(adapter);
1826 rc = init_rx_pools(netdev);
1829 rc = init_tx_pools(netdev);
1833 release_napi(adapter);
1834 rc = init_napi(adapter);
1838 rc = reset_tx_pools(adapter);
1842 rc = reset_rx_pools(adapter);
1846 ibmvnic_disable_irqs(adapter);
1848 adapter->state = VNIC_CLOSED;
1850 if (reset_state == VNIC_CLOSED)
1853 rc = __ibmvnic_open(netdev);
1855 if (list_empty(&adapter->rwi_list))
1856 adapter->state = VNIC_CLOSED;
1858 adapter->state = reset_state;
1864 for (i = 0; i < adapter->req_rx_queues; i++)
1865 napi_schedule(&adapter->napi[i]);
1867 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1868 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1869 netdev_notify_peers(netdev);
1871 netif_carrier_on(netdev);
1876 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1877 struct ibmvnic_rwi *rwi, u32 reset_state)
1879 struct net_device *netdev = adapter->netdev;
1882 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1885 netif_carrier_off(netdev);
1886 adapter->reset_reason = rwi->reset_reason;
1888 ibmvnic_cleanup(netdev);
1889 release_resources(adapter);
1890 release_sub_crqs(adapter, 0);
1891 release_crq_queue(adapter);
1893 /* remove the closed state so when we call open it appears
1894 * we are coming from the probed state.
1896 adapter->state = VNIC_PROBED;
1898 rc = init_crq_queue(adapter);
1900 netdev_err(adapter->netdev,
1901 "Couldn't initialize crq. rc=%d\n", rc);
1905 rc = ibmvnic_init(adapter);
1909 /* If the adapter was in PROBE state prior to the reset,
1912 if (reset_state == VNIC_PROBED)
1915 rc = ibmvnic_login(netdev);
1917 adapter->state = VNIC_PROBED;
1920 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1921 * unless wait_for_reset is set, in which case the rtnl lock
1922 * has already been taken before initializing the reset
1924 if (!adapter->wait_for_reset) {
1926 rc = init_resources(adapter);
1929 rc = init_resources(adapter);
1934 ibmvnic_disable_irqs(adapter);
1935 adapter->state = VNIC_CLOSED;
1937 if (reset_state == VNIC_CLOSED)
1940 rc = __ibmvnic_open(netdev);
1942 if (list_empty(&adapter->rwi_list))
1943 adapter->state = VNIC_CLOSED;
1945 adapter->state = reset_state;
1950 netif_carrier_on(netdev);
1955 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1957 struct ibmvnic_rwi *rwi;
1959 mutex_lock(&adapter->rwi_lock);
1961 if (!list_empty(&adapter->rwi_list)) {
1962 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1964 list_del(&rwi->list);
1969 mutex_unlock(&adapter->rwi_lock);
1973 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1975 struct ibmvnic_rwi *rwi;
1977 rwi = get_next_rwi(adapter);
1980 rwi = get_next_rwi(adapter);
1984 static void __ibmvnic_reset(struct work_struct *work)
1986 struct ibmvnic_rwi *rwi;
1987 struct ibmvnic_adapter *adapter;
1988 struct net_device *netdev;
1992 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1993 netdev = adapter->netdev;
1995 mutex_lock(&adapter->reset_lock);
1996 reset_state = adapter->state;
1998 rwi = get_next_rwi(adapter);
2000 if (adapter->force_reset_recovery) {
2001 adapter->force_reset_recovery = false;
2002 rc = do_hard_reset(adapter, rwi, reset_state);
2004 rc = do_reset(adapter, rwi, reset_state);
2007 if (rc && rc != IBMVNIC_INIT_FAILED &&
2008 !adapter->force_reset_recovery)
2011 rwi = get_next_rwi(adapter);
2014 if (adapter->wait_for_reset) {
2015 adapter->wait_for_reset = false;
2016 adapter->reset_done_rc = rc;
2017 complete(&adapter->reset_done);
2021 netdev_dbg(adapter->netdev, "Reset failed\n");
2022 free_all_rwi(adapter);
2023 mutex_unlock(&adapter->reset_lock);
2027 adapter->resetting = false;
2028 mutex_unlock(&adapter->reset_lock);
2031 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2032 enum ibmvnic_reset_reason reason)
2034 struct list_head *entry, *tmp_entry;
2035 struct ibmvnic_rwi *rwi, *tmp;
2036 struct net_device *netdev = adapter->netdev;
2039 if (adapter->state == VNIC_REMOVING ||
2040 adapter->state == VNIC_REMOVED ||
2041 adapter->failover_pending) {
2043 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2047 if (adapter->state == VNIC_PROBING) {
2048 netdev_warn(netdev, "Adapter reset during probe\n");
2049 ret = adapter->init_done_rc = EAGAIN;
2053 mutex_lock(&adapter->rwi_lock);
2055 list_for_each(entry, &adapter->rwi_list) {
2056 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2057 if (tmp->reset_reason == reason) {
2058 netdev_dbg(netdev, "Skipping matching reset\n");
2059 mutex_unlock(&adapter->rwi_lock);
2065 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
2067 mutex_unlock(&adapter->rwi_lock);
2068 ibmvnic_close(netdev);
2072 /* if we just received a transport event,
2073 * flush reset queue and process this reset
2075 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2076 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2079 rwi->reset_reason = reason;
2080 list_add_tail(&rwi->list, &adapter->rwi_list);
2081 mutex_unlock(&adapter->rwi_lock);
2082 adapter->resetting = true;
2083 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2084 schedule_work(&adapter->ibmvnic_reset);
2088 if (adapter->wait_for_reset)
2089 adapter->wait_for_reset = false;
2093 static void ibmvnic_tx_timeout(struct net_device *dev)
2095 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2097 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2100 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2101 struct ibmvnic_rx_buff *rx_buff)
2103 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2105 rx_buff->skb = NULL;
2107 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2108 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2110 atomic_dec(&pool->available);
2113 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2115 struct net_device *netdev = napi->dev;
2116 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2117 int scrq_num = (int)(napi - adapter->napi);
2118 int frames_processed = 0;
2121 while (frames_processed < budget) {
2122 struct sk_buff *skb;
2123 struct ibmvnic_rx_buff *rx_buff;
2124 union sub_crq *next;
2129 if (unlikely(adapter->resetting &&
2130 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2131 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2132 napi_complete_done(napi, frames_processed);
2133 return frames_processed;
2136 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2138 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2140 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2141 rx_comp.correlator);
2142 /* do error checking */
2143 if (next->rx_comp.rc) {
2144 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2145 be16_to_cpu(next->rx_comp.rc));
2146 /* free the entry */
2147 next->rx_comp.first = 0;
2148 dev_kfree_skb_any(rx_buff->skb);
2149 remove_buff_from_pool(adapter, rx_buff);
2151 } else if (!rx_buff->skb) {
2152 /* free the entry */
2153 next->rx_comp.first = 0;
2154 remove_buff_from_pool(adapter, rx_buff);
2158 length = be32_to_cpu(next->rx_comp.len);
2159 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2160 flags = next->rx_comp.flags;
2162 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2165 /* VLAN Header has been stripped by the system firmware and
2166 * needs to be inserted by the driver
2168 if (adapter->rx_vlan_header_insertion &&
2169 (flags & IBMVNIC_VLAN_STRIPPED))
2170 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2171 ntohs(next->rx_comp.vlan_tci));
2173 /* free the entry */
2174 next->rx_comp.first = 0;
2175 remove_buff_from_pool(adapter, rx_buff);
2177 skb_put(skb, length);
2178 skb->protocol = eth_type_trans(skb, netdev);
2179 skb_record_rx_queue(skb, scrq_num);
2181 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2182 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2183 skb->ip_summed = CHECKSUM_UNNECESSARY;
2187 napi_gro_receive(napi, skb); /* send it up */
2188 netdev->stats.rx_packets++;
2189 netdev->stats.rx_bytes += length;
2190 adapter->rx_stats_buffers[scrq_num].packets++;
2191 adapter->rx_stats_buffers[scrq_num].bytes += length;
2195 if (adapter->state != VNIC_CLOSING)
2196 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2198 if (frames_processed < budget) {
2199 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2200 napi_complete_done(napi, frames_processed);
2201 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2202 napi_reschedule(napi)) {
2203 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2207 return frames_processed;
2210 #ifdef CONFIG_NET_POLL_CONTROLLER
2211 static void ibmvnic_netpoll_controller(struct net_device *dev)
2213 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2216 replenish_pools(netdev_priv(dev));
2217 for (i = 0; i < adapter->req_rx_queues; i++)
2218 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2219 adapter->rx_scrq[i]);
2223 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2227 adapter->fallback.mtu = adapter->req_mtu;
2228 adapter->fallback.rx_queues = adapter->req_rx_queues;
2229 adapter->fallback.tx_queues = adapter->req_tx_queues;
2230 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2231 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2233 init_completion(&adapter->reset_done);
2234 adapter->wait_for_reset = true;
2235 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2238 wait_for_completion(&adapter->reset_done);
2241 if (adapter->reset_done_rc) {
2243 adapter->desired.mtu = adapter->fallback.mtu;
2244 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2245 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2246 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2247 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2249 init_completion(&adapter->reset_done);
2250 adapter->wait_for_reset = true;
2251 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2254 wait_for_completion(&adapter->reset_done);
2256 adapter->wait_for_reset = false;
2261 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2263 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2265 adapter->desired.mtu = new_mtu + ETH_HLEN;
2267 return wait_for_reset(adapter);
2270 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2271 struct net_device *dev,
2272 netdev_features_t features)
2274 /* Some backing hardware adapters can not
2275 * handle packets with a MSS less than 224
2276 * or with only one segment.
2278 if (skb_is_gso(skb)) {
2279 if (skb_shinfo(skb)->gso_size < 224 ||
2280 skb_shinfo(skb)->gso_segs == 1)
2281 features &= ~NETIF_F_GSO_MASK;
2287 static const struct net_device_ops ibmvnic_netdev_ops = {
2288 .ndo_open = ibmvnic_open,
2289 .ndo_stop = ibmvnic_close,
2290 .ndo_start_xmit = ibmvnic_xmit,
2291 .ndo_set_rx_mode = ibmvnic_set_multi,
2292 .ndo_set_mac_address = ibmvnic_set_mac,
2293 .ndo_validate_addr = eth_validate_addr,
2294 .ndo_tx_timeout = ibmvnic_tx_timeout,
2295 #ifdef CONFIG_NET_POLL_CONTROLLER
2296 .ndo_poll_controller = ibmvnic_netpoll_controller,
2298 .ndo_change_mtu = ibmvnic_change_mtu,
2299 .ndo_features_check = ibmvnic_features_check,
2302 /* ethtool functions */
2304 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2305 struct ethtool_link_ksettings *cmd)
2307 u32 supported, advertising;
2309 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2311 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2313 cmd->base.speed = SPEED_1000;
2314 cmd->base.duplex = DUPLEX_FULL;
2315 cmd->base.port = PORT_FIBRE;
2316 cmd->base.phy_address = 0;
2317 cmd->base.autoneg = AUTONEG_ENABLE;
2319 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2321 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2327 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2328 struct ethtool_drvinfo *info)
2330 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2332 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2333 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2334 strlcpy(info->fw_version, adapter->fw_version,
2335 sizeof(info->fw_version));
2338 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2340 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2342 return adapter->msg_enable;
2345 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2347 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2349 adapter->msg_enable = data;
2352 static u32 ibmvnic_get_link(struct net_device *netdev)
2354 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2356 /* Don't need to send a query because we request a logical link up at
2357 * init and then we wait for link state indications
2359 return adapter->logical_link_state;
2362 static void ibmvnic_get_ringparam(struct net_device *netdev,
2363 struct ethtool_ringparam *ring)
2365 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2367 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2368 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2369 ring->rx_mini_max_pending = 0;
2370 ring->rx_jumbo_max_pending = 0;
2371 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2372 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2373 ring->rx_mini_pending = 0;
2374 ring->rx_jumbo_pending = 0;
2377 static int ibmvnic_set_ringparam(struct net_device *netdev,
2378 struct ethtool_ringparam *ring)
2380 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2382 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2383 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2384 netdev_err(netdev, "Invalid request.\n");
2385 netdev_err(netdev, "Max tx buffers = %llu\n",
2386 adapter->max_rx_add_entries_per_subcrq);
2387 netdev_err(netdev, "Max rx buffers = %llu\n",
2388 adapter->max_tx_entries_per_subcrq);
2392 adapter->desired.rx_entries = ring->rx_pending;
2393 adapter->desired.tx_entries = ring->tx_pending;
2395 return wait_for_reset(adapter);
2398 static void ibmvnic_get_channels(struct net_device *netdev,
2399 struct ethtool_channels *channels)
2401 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2403 channels->max_rx = adapter->max_rx_queues;
2404 channels->max_tx = adapter->max_tx_queues;
2405 channels->max_other = 0;
2406 channels->max_combined = 0;
2407 channels->rx_count = adapter->req_rx_queues;
2408 channels->tx_count = adapter->req_tx_queues;
2409 channels->other_count = 0;
2410 channels->combined_count = 0;
2413 static int ibmvnic_set_channels(struct net_device *netdev,
2414 struct ethtool_channels *channels)
2416 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2418 adapter->desired.rx_queues = channels->rx_count;
2419 adapter->desired.tx_queues = channels->tx_count;
2421 return wait_for_reset(adapter);
2424 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2426 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2429 if (stringset != ETH_SS_STATS)
2432 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2433 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2435 for (i = 0; i < adapter->req_tx_queues; i++) {
2436 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2437 data += ETH_GSTRING_LEN;
2439 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2440 data += ETH_GSTRING_LEN;
2442 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2443 data += ETH_GSTRING_LEN;
2446 for (i = 0; i < adapter->req_rx_queues; i++) {
2447 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2448 data += ETH_GSTRING_LEN;
2450 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2451 data += ETH_GSTRING_LEN;
2453 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2454 data += ETH_GSTRING_LEN;
2458 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2460 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2464 return ARRAY_SIZE(ibmvnic_stats) +
2465 adapter->req_tx_queues * NUM_TX_STATS +
2466 adapter->req_rx_queues * NUM_RX_STATS;
2472 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2473 struct ethtool_stats *stats, u64 *data)
2475 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2476 union ibmvnic_crq crq;
2480 memset(&crq, 0, sizeof(crq));
2481 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2482 crq.request_statistics.cmd = REQUEST_STATISTICS;
2483 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2484 crq.request_statistics.len =
2485 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2487 /* Wait for data to be written */
2488 init_completion(&adapter->stats_done);
2489 rc = ibmvnic_send_crq(adapter, &crq);
2492 wait_for_completion(&adapter->stats_done);
2494 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2495 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2496 ibmvnic_stats[i].offset));
2498 for (j = 0; j < adapter->req_tx_queues; j++) {
2499 data[i] = adapter->tx_stats_buffers[j].packets;
2501 data[i] = adapter->tx_stats_buffers[j].bytes;
2503 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2507 for (j = 0; j < adapter->req_rx_queues; j++) {
2508 data[i] = adapter->rx_stats_buffers[j].packets;
2510 data[i] = adapter->rx_stats_buffers[j].bytes;
2512 data[i] = adapter->rx_stats_buffers[j].interrupts;
2517 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2518 .get_drvinfo = ibmvnic_get_drvinfo,
2519 .get_msglevel = ibmvnic_get_msglevel,
2520 .set_msglevel = ibmvnic_set_msglevel,
2521 .get_link = ibmvnic_get_link,
2522 .get_ringparam = ibmvnic_get_ringparam,
2523 .set_ringparam = ibmvnic_set_ringparam,
2524 .get_channels = ibmvnic_get_channels,
2525 .set_channels = ibmvnic_set_channels,
2526 .get_strings = ibmvnic_get_strings,
2527 .get_sset_count = ibmvnic_get_sset_count,
2528 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2529 .get_link_ksettings = ibmvnic_get_link_ksettings,
2532 /* Routines for managing CRQs/sCRQs */
2534 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2535 struct ibmvnic_sub_crq_queue *scrq)
2540 free_irq(scrq->irq, scrq);
2541 irq_dispose_mapping(scrq->irq);
2545 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2546 atomic_set(&scrq->used, 0);
2549 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2550 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2554 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2558 for (i = 0; i < adapter->req_tx_queues; i++) {
2559 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2560 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2565 for (i = 0; i < adapter->req_rx_queues; i++) {
2566 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2567 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2575 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2576 struct ibmvnic_sub_crq_queue *scrq,
2579 struct device *dev = &adapter->vdev->dev;
2582 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2585 /* Close the sub-crqs */
2587 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2588 adapter->vdev->unit_address,
2590 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2593 netdev_err(adapter->netdev,
2594 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2599 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2601 free_pages((unsigned long)scrq->msgs, 2);
2605 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2608 struct device *dev = &adapter->vdev->dev;
2609 struct ibmvnic_sub_crq_queue *scrq;
2612 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2617 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2619 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2620 goto zero_page_failed;
2623 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2625 if (dma_mapping_error(dev, scrq->msg_token)) {
2626 dev_warn(dev, "Couldn't map crq queue messages page\n");
2630 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2631 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2633 if (rc == H_RESOURCE)
2634 rc = ibmvnic_reset_crq(adapter);
2636 if (rc == H_CLOSED) {
2637 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2639 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2643 scrq->adapter = adapter;
2644 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2645 spin_lock_init(&scrq->lock);
2647 netdev_dbg(adapter->netdev,
2648 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2649 scrq->crq_num, scrq->hw_irq, scrq->irq);
2654 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2657 free_pages((unsigned long)scrq->msgs, 2);
2664 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2668 if (adapter->tx_scrq) {
2669 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2670 if (!adapter->tx_scrq[i])
2673 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2675 if (adapter->tx_scrq[i]->irq) {
2676 free_irq(adapter->tx_scrq[i]->irq,
2677 adapter->tx_scrq[i]);
2678 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2679 adapter->tx_scrq[i]->irq = 0;
2682 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2686 kfree(adapter->tx_scrq);
2687 adapter->tx_scrq = NULL;
2688 adapter->num_active_tx_scrqs = 0;
2691 if (adapter->rx_scrq) {
2692 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2693 if (!adapter->rx_scrq[i])
2696 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2698 if (adapter->rx_scrq[i]->irq) {
2699 free_irq(adapter->rx_scrq[i]->irq,
2700 adapter->rx_scrq[i]);
2701 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2702 adapter->rx_scrq[i]->irq = 0;
2705 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2709 kfree(adapter->rx_scrq);
2710 adapter->rx_scrq = NULL;
2711 adapter->num_active_rx_scrqs = 0;
2715 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2716 struct ibmvnic_sub_crq_queue *scrq)
2718 struct device *dev = &adapter->vdev->dev;
2721 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2722 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2724 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2729 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2730 struct ibmvnic_sub_crq_queue *scrq)
2732 struct device *dev = &adapter->vdev->dev;
2735 if (scrq->hw_irq > 0x100000000ULL) {
2736 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2740 if (adapter->resetting &&
2741 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2742 u64 val = (0xff000000) | scrq->hw_irq;
2744 rc = plpar_hcall_norets(H_EOI, val);
2746 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2750 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2751 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2753 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2758 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2759 struct ibmvnic_sub_crq_queue *scrq)
2761 struct device *dev = &adapter->vdev->dev;
2762 struct ibmvnic_tx_pool *tx_pool;
2763 struct ibmvnic_tx_buff *txbuff;
2764 union sub_crq *next;
2770 while (pending_scrq(adapter, scrq)) {
2771 unsigned int pool = scrq->pool_index;
2772 int num_entries = 0;
2774 next = ibmvnic_next_scrq(adapter, scrq);
2775 for (i = 0; i < next->tx_comp.num_comps; i++) {
2776 if (next->tx_comp.rcs[i]) {
2777 dev_err(dev, "tx error %x\n",
2778 next->tx_comp.rcs[i]);
2781 index = be32_to_cpu(next->tx_comp.correlators[i]);
2782 if (index & IBMVNIC_TSO_POOL_MASK) {
2783 tx_pool = &adapter->tso_pool[pool];
2784 index &= ~IBMVNIC_TSO_POOL_MASK;
2786 tx_pool = &adapter->tx_pool[pool];
2789 txbuff = &tx_pool->tx_buff[index];
2791 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2792 if (!txbuff->data_dma[j])
2795 txbuff->data_dma[j] = 0;
2797 /* if sub_crq was sent indirectly */
2798 first = &txbuff->indir_arr[0].generic.first;
2799 if (*first == IBMVNIC_CRQ_CMD) {
2800 dma_unmap_single(dev, txbuff->indir_dma,
2801 sizeof(txbuff->indir_arr),
2806 if (txbuff->last_frag) {
2807 dev_kfree_skb_any(txbuff->skb);
2811 num_entries += txbuff->num_entries;
2813 tx_pool->free_map[tx_pool->producer_index] = index;
2814 tx_pool->producer_index =
2815 (tx_pool->producer_index + 1) %
2816 tx_pool->num_buffers;
2818 /* remove tx_comp scrq*/
2819 next->tx_comp.first = 0;
2821 if (atomic_sub_return(num_entries, &scrq->used) <=
2822 (adapter->req_tx_entries_per_subcrq / 2) &&
2823 __netif_subqueue_stopped(adapter->netdev,
2824 scrq->pool_index)) {
2825 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2826 netdev_dbg(adapter->netdev, "Started queue %d\n",
2831 enable_scrq_irq(adapter, scrq);
2833 if (pending_scrq(adapter, scrq)) {
2834 disable_scrq_irq(adapter, scrq);
2841 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2843 struct ibmvnic_sub_crq_queue *scrq = instance;
2844 struct ibmvnic_adapter *adapter = scrq->adapter;
2846 disable_scrq_irq(adapter, scrq);
2847 ibmvnic_complete_tx(adapter, scrq);
2852 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2854 struct ibmvnic_sub_crq_queue *scrq = instance;
2855 struct ibmvnic_adapter *adapter = scrq->adapter;
2857 /* When booting a kdump kernel we can hit pending interrupts
2858 * prior to completing driver initialization.
2860 if (unlikely(adapter->state != VNIC_OPEN))
2863 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2865 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2866 disable_scrq_irq(adapter, scrq);
2867 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2873 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2875 struct device *dev = &adapter->vdev->dev;
2876 struct ibmvnic_sub_crq_queue *scrq;
2880 for (i = 0; i < adapter->req_tx_queues; i++) {
2881 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2883 scrq = adapter->tx_scrq[i];
2884 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2888 dev_err(dev, "Error mapping irq\n");
2889 goto req_tx_irq_failed;
2892 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2893 0, "ibmvnic_tx", scrq);
2896 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2898 irq_dispose_mapping(scrq->irq);
2899 goto req_tx_irq_failed;
2903 for (i = 0; i < adapter->req_rx_queues; i++) {
2904 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2906 scrq = adapter->rx_scrq[i];
2907 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2910 dev_err(dev, "Error mapping irq\n");
2911 goto req_rx_irq_failed;
2913 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2914 0, "ibmvnic_rx", scrq);
2916 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2918 irq_dispose_mapping(scrq->irq);
2919 goto req_rx_irq_failed;
2925 for (j = 0; j < i; j++) {
2926 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2927 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2929 i = adapter->req_tx_queues;
2931 for (j = 0; j < i; j++) {
2932 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2933 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2935 release_sub_crqs(adapter, 1);
2939 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2941 struct device *dev = &adapter->vdev->dev;
2942 struct ibmvnic_sub_crq_queue **allqueues;
2943 int registered_queues = 0;
2948 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2950 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2954 for (i = 0; i < total_queues; i++) {
2955 allqueues[i] = init_sub_crq_queue(adapter);
2956 if (!allqueues[i]) {
2957 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2960 registered_queues++;
2963 /* Make sure we were able to register the minimum number of queues */
2964 if (registered_queues <
2965 adapter->min_tx_queues + adapter->min_rx_queues) {
2966 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2970 /* Distribute the failed allocated queues*/
2971 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2972 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2975 if (adapter->req_rx_queues > adapter->min_rx_queues)
2976 adapter->req_rx_queues--;
2981 if (adapter->req_tx_queues > adapter->min_tx_queues)
2982 adapter->req_tx_queues--;
2989 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2990 sizeof(*adapter->tx_scrq), GFP_KERNEL);
2991 if (!adapter->tx_scrq)
2994 for (i = 0; i < adapter->req_tx_queues; i++) {
2995 adapter->tx_scrq[i] = allqueues[i];
2996 adapter->tx_scrq[i]->pool_index = i;
2997 adapter->num_active_tx_scrqs++;
3000 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3001 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3002 if (!adapter->rx_scrq)
3005 for (i = 0; i < adapter->req_rx_queues; i++) {
3006 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3007 adapter->rx_scrq[i]->scrq_num = i;
3008 adapter->num_active_rx_scrqs++;
3015 kfree(adapter->tx_scrq);
3016 adapter->tx_scrq = NULL;
3018 for (i = 0; i < registered_queues; i++)
3019 release_sub_crq_queue(adapter, allqueues[i], 1);
3024 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3026 struct device *dev = &adapter->vdev->dev;
3027 union ibmvnic_crq crq;
3031 /* Sub-CRQ entries are 32 byte long */
3032 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3034 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3035 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3036 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3040 if (adapter->desired.mtu)
3041 adapter->req_mtu = adapter->desired.mtu;
3043 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3045 if (!adapter->desired.tx_entries)
3046 adapter->desired.tx_entries =
3047 adapter->max_tx_entries_per_subcrq;
3048 if (!adapter->desired.rx_entries)
3049 adapter->desired.rx_entries =
3050 adapter->max_rx_add_entries_per_subcrq;
3052 max_entries = IBMVNIC_MAX_LTB_SIZE /
3053 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3055 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3056 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3057 adapter->desired.tx_entries = max_entries;
3060 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3061 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3062 adapter->desired.rx_entries = max_entries;
3065 if (adapter->desired.tx_entries)
3066 adapter->req_tx_entries_per_subcrq =
3067 adapter->desired.tx_entries;
3069 adapter->req_tx_entries_per_subcrq =
3070 adapter->max_tx_entries_per_subcrq;
3072 if (adapter->desired.rx_entries)
3073 adapter->req_rx_add_entries_per_subcrq =
3074 adapter->desired.rx_entries;
3076 adapter->req_rx_add_entries_per_subcrq =
3077 adapter->max_rx_add_entries_per_subcrq;
3079 if (adapter->desired.tx_queues)
3080 adapter->req_tx_queues =
3081 adapter->desired.tx_queues;
3083 adapter->req_tx_queues =
3084 adapter->opt_tx_comp_sub_queues;
3086 if (adapter->desired.rx_queues)
3087 adapter->req_rx_queues =
3088 adapter->desired.rx_queues;
3090 adapter->req_rx_queues =
3091 adapter->opt_rx_comp_queues;
3093 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3096 memset(&crq, 0, sizeof(crq));
3097 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3098 crq.request_capability.cmd = REQUEST_CAPABILITY;
3100 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3101 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3102 atomic_inc(&adapter->running_cap_crqs);
3103 ibmvnic_send_crq(adapter, &crq);
3105 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3106 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3107 atomic_inc(&adapter->running_cap_crqs);
3108 ibmvnic_send_crq(adapter, &crq);
3110 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3111 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3112 atomic_inc(&adapter->running_cap_crqs);
3113 ibmvnic_send_crq(adapter, &crq);
3115 crq.request_capability.capability =
3116 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3117 crq.request_capability.number =
3118 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3119 atomic_inc(&adapter->running_cap_crqs);
3120 ibmvnic_send_crq(adapter, &crq);
3122 crq.request_capability.capability =
3123 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3124 crq.request_capability.number =
3125 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3126 atomic_inc(&adapter->running_cap_crqs);
3127 ibmvnic_send_crq(adapter, &crq);
3129 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3130 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3131 atomic_inc(&adapter->running_cap_crqs);
3132 ibmvnic_send_crq(adapter, &crq);
3134 if (adapter->netdev->flags & IFF_PROMISC) {
3135 if (adapter->promisc_supported) {
3136 crq.request_capability.capability =
3137 cpu_to_be16(PROMISC_REQUESTED);
3138 crq.request_capability.number = cpu_to_be64(1);
3139 atomic_inc(&adapter->running_cap_crqs);
3140 ibmvnic_send_crq(adapter, &crq);
3143 crq.request_capability.capability =
3144 cpu_to_be16(PROMISC_REQUESTED);
3145 crq.request_capability.number = cpu_to_be64(0);
3146 atomic_inc(&adapter->running_cap_crqs);
3147 ibmvnic_send_crq(adapter, &crq);
3151 static int pending_scrq(struct ibmvnic_adapter *adapter,
3152 struct ibmvnic_sub_crq_queue *scrq)
3154 union sub_crq *entry = &scrq->msgs[scrq->cur];
3156 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3162 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3163 struct ibmvnic_sub_crq_queue *scrq)
3165 union sub_crq *entry;
3166 unsigned long flags;
3168 spin_lock_irqsave(&scrq->lock, flags);
3169 entry = &scrq->msgs[scrq->cur];
3170 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3171 if (++scrq->cur == scrq->size)
3176 spin_unlock_irqrestore(&scrq->lock, flags);
3181 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3183 struct ibmvnic_crq_queue *queue = &adapter->crq;
3184 union ibmvnic_crq *crq;
3186 crq = &queue->msgs[queue->cur];
3187 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3188 if (++queue->cur == queue->size)
3197 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3201 dev_warn_ratelimited(dev,
3202 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3206 dev_warn_ratelimited(dev,
3207 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3211 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3216 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3217 union sub_crq *sub_crq)
3219 unsigned int ua = adapter->vdev->unit_address;
3220 struct device *dev = &adapter->vdev->dev;
3221 u64 *u64_crq = (u64 *)sub_crq;
3224 netdev_dbg(adapter->netdev,
3225 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3226 (unsigned long int)cpu_to_be64(remote_handle),
3227 (unsigned long int)cpu_to_be64(u64_crq[0]),
3228 (unsigned long int)cpu_to_be64(u64_crq[1]),
3229 (unsigned long int)cpu_to_be64(u64_crq[2]),
3230 (unsigned long int)cpu_to_be64(u64_crq[3]));
3232 /* Make sure the hypervisor sees the complete request */
3235 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3236 cpu_to_be64(remote_handle),
3237 cpu_to_be64(u64_crq[0]),
3238 cpu_to_be64(u64_crq[1]),
3239 cpu_to_be64(u64_crq[2]),
3240 cpu_to_be64(u64_crq[3]));
3243 print_subcrq_error(dev, rc, __func__);
3248 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3249 u64 remote_handle, u64 ioba, u64 num_entries)
3251 unsigned int ua = adapter->vdev->unit_address;
3252 struct device *dev = &adapter->vdev->dev;
3255 /* Make sure the hypervisor sees the complete request */
3257 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3258 cpu_to_be64(remote_handle),
3262 print_subcrq_error(dev, rc, __func__);
3267 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3268 union ibmvnic_crq *crq)
3270 unsigned int ua = adapter->vdev->unit_address;
3271 struct device *dev = &adapter->vdev->dev;
3272 u64 *u64_crq = (u64 *)crq;
3275 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3276 (unsigned long int)cpu_to_be64(u64_crq[0]),
3277 (unsigned long int)cpu_to_be64(u64_crq[1]));
3279 if (!adapter->crq.active &&
3280 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3281 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3285 /* Make sure the hypervisor sees the complete request */
3288 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3289 cpu_to_be64(u64_crq[0]),
3290 cpu_to_be64(u64_crq[1]));
3293 if (rc == H_CLOSED) {
3294 dev_warn(dev, "CRQ Queue closed\n");
3295 if (adapter->resetting)
3296 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3299 dev_warn(dev, "Send error (rc=%d)\n", rc);
3305 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3307 union ibmvnic_crq crq;
3309 memset(&crq, 0, sizeof(crq));
3310 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3311 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3312 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3314 return ibmvnic_send_crq(adapter, &crq);
3317 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3319 union ibmvnic_crq crq;
3321 memset(&crq, 0, sizeof(crq));
3322 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3323 crq.version_exchange.cmd = VERSION_EXCHANGE;
3324 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3326 return ibmvnic_send_crq(adapter, &crq);
3329 struct vnic_login_client_data {
3335 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3339 /* Calculate the amount of buffer space needed for the
3340 * vnic client data in the login buffer. There are four entries,
3341 * OS name, LPAR name, device name, and a null last entry.
3343 len = 4 * sizeof(struct vnic_login_client_data);
3344 len += 6; /* "Linux" plus NULL */
3345 len += strlen(utsname()->nodename) + 1;
3346 len += strlen(adapter->netdev->name) + 1;
3351 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3352 struct vnic_login_client_data *vlcd)
3354 const char *os_name = "Linux";
3357 /* Type 1 - LPAR OS */
3359 len = strlen(os_name) + 1;
3360 vlcd->len = cpu_to_be16(len);
3361 strncpy(vlcd->name, os_name, len);
3362 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3364 /* Type 2 - LPAR name */
3366 len = strlen(utsname()->nodename) + 1;
3367 vlcd->len = cpu_to_be16(len);
3368 strncpy(vlcd->name, utsname()->nodename, len);
3369 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3371 /* Type 3 - device name */
3373 len = strlen(adapter->netdev->name) + 1;
3374 vlcd->len = cpu_to_be16(len);
3375 strncpy(vlcd->name, adapter->netdev->name, len);
3378 static int send_login(struct ibmvnic_adapter *adapter)
3380 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3381 struct ibmvnic_login_buffer *login_buffer;
3382 struct device *dev = &adapter->vdev->dev;
3383 dma_addr_t rsp_buffer_token;
3384 dma_addr_t buffer_token;
3385 size_t rsp_buffer_size;
3386 union ibmvnic_crq crq;
3390 int client_data_len;
3391 struct vnic_login_client_data *vlcd;
3394 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3395 netdev_err(adapter->netdev,
3396 "RX or TX queues are not allocated, device login failed\n");
3400 release_login_rsp_buffer(adapter);
3401 client_data_len = vnic_client_data_len(adapter);
3404 sizeof(struct ibmvnic_login_buffer) +
3405 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3408 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3410 goto buf_alloc_failed;
3412 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3414 if (dma_mapping_error(dev, buffer_token)) {
3415 dev_err(dev, "Couldn't map login buffer\n");
3416 goto buf_map_failed;
3419 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3420 sizeof(u64) * adapter->req_tx_queues +
3421 sizeof(u64) * adapter->req_rx_queues +
3422 sizeof(u64) * adapter->req_rx_queues +
3423 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3425 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3426 if (!login_rsp_buffer)
3427 goto buf_rsp_alloc_failed;
3429 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3430 rsp_buffer_size, DMA_FROM_DEVICE);
3431 if (dma_mapping_error(dev, rsp_buffer_token)) {
3432 dev_err(dev, "Couldn't map login rsp buffer\n");
3433 goto buf_rsp_map_failed;
3436 adapter->login_buf = login_buffer;
3437 adapter->login_buf_token = buffer_token;
3438 adapter->login_buf_sz = buffer_size;
3439 adapter->login_rsp_buf = login_rsp_buffer;
3440 adapter->login_rsp_buf_token = rsp_buffer_token;
3441 adapter->login_rsp_buf_sz = rsp_buffer_size;
3443 login_buffer->len = cpu_to_be32(buffer_size);
3444 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3445 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3446 login_buffer->off_txcomp_subcrqs =
3447 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3448 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3449 login_buffer->off_rxcomp_subcrqs =
3450 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3451 sizeof(u64) * adapter->req_tx_queues);
3452 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3453 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3455 tx_list_p = (__be64 *)((char *)login_buffer +
3456 sizeof(struct ibmvnic_login_buffer));
3457 rx_list_p = (__be64 *)((char *)login_buffer +
3458 sizeof(struct ibmvnic_login_buffer) +
3459 sizeof(u64) * adapter->req_tx_queues);
3461 for (i = 0; i < adapter->req_tx_queues; i++) {
3462 if (adapter->tx_scrq[i]) {
3463 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3468 for (i = 0; i < adapter->req_rx_queues; i++) {
3469 if (adapter->rx_scrq[i]) {
3470 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3475 /* Insert vNIC login client data */
3476 vlcd = (struct vnic_login_client_data *)
3477 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3478 login_buffer->client_data_offset =
3479 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3480 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3482 vnic_add_client_data(adapter, vlcd);
3484 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3485 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3486 netdev_dbg(adapter->netdev, "%016lx\n",
3487 ((unsigned long int *)(adapter->login_buf))[i]);
3490 memset(&crq, 0, sizeof(crq));
3491 crq.login.first = IBMVNIC_CRQ_CMD;
3492 crq.login.cmd = LOGIN;
3493 crq.login.ioba = cpu_to_be32(buffer_token);
3494 crq.login.len = cpu_to_be32(buffer_size);
3495 ibmvnic_send_crq(adapter, &crq);
3500 kfree(login_rsp_buffer);
3501 buf_rsp_alloc_failed:
3502 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3504 kfree(login_buffer);
3509 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3512 union ibmvnic_crq crq;
3514 memset(&crq, 0, sizeof(crq));
3515 crq.request_map.first = IBMVNIC_CRQ_CMD;
3516 crq.request_map.cmd = REQUEST_MAP;
3517 crq.request_map.map_id = map_id;
3518 crq.request_map.ioba = cpu_to_be32(addr);
3519 crq.request_map.len = cpu_to_be32(len);
3520 return ibmvnic_send_crq(adapter, &crq);
3523 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3525 union ibmvnic_crq crq;
3527 memset(&crq, 0, sizeof(crq));
3528 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3529 crq.request_unmap.cmd = REQUEST_UNMAP;
3530 crq.request_unmap.map_id = map_id;
3531 return ibmvnic_send_crq(adapter, &crq);
3534 static void send_map_query(struct ibmvnic_adapter *adapter)
3536 union ibmvnic_crq crq;
3538 memset(&crq, 0, sizeof(crq));
3539 crq.query_map.first = IBMVNIC_CRQ_CMD;
3540 crq.query_map.cmd = QUERY_MAP;
3541 ibmvnic_send_crq(adapter, &crq);
3544 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3545 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3547 union ibmvnic_crq crq;
3549 atomic_set(&adapter->running_cap_crqs, 0);
3550 memset(&crq, 0, sizeof(crq));
3551 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3552 crq.query_capability.cmd = QUERY_CAPABILITY;
3554 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3555 atomic_inc(&adapter->running_cap_crqs);
3556 ibmvnic_send_crq(adapter, &crq);
3558 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3559 atomic_inc(&adapter->running_cap_crqs);
3560 ibmvnic_send_crq(adapter, &crq);
3562 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3563 atomic_inc(&adapter->running_cap_crqs);
3564 ibmvnic_send_crq(adapter, &crq);
3566 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3567 atomic_inc(&adapter->running_cap_crqs);
3568 ibmvnic_send_crq(adapter, &crq);
3570 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3571 atomic_inc(&adapter->running_cap_crqs);
3572 ibmvnic_send_crq(adapter, &crq);
3574 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3575 atomic_inc(&adapter->running_cap_crqs);
3576 ibmvnic_send_crq(adapter, &crq);
3578 crq.query_capability.capability =
3579 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3580 atomic_inc(&adapter->running_cap_crqs);
3581 ibmvnic_send_crq(adapter, &crq);
3583 crq.query_capability.capability =
3584 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3585 atomic_inc(&adapter->running_cap_crqs);
3586 ibmvnic_send_crq(adapter, &crq);
3588 crq.query_capability.capability =
3589 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3590 atomic_inc(&adapter->running_cap_crqs);
3591 ibmvnic_send_crq(adapter, &crq);
3593 crq.query_capability.capability =
3594 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3595 atomic_inc(&adapter->running_cap_crqs);
3596 ibmvnic_send_crq(adapter, &crq);
3598 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3599 atomic_inc(&adapter->running_cap_crqs);
3600 ibmvnic_send_crq(adapter, &crq);
3602 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3603 atomic_inc(&adapter->running_cap_crqs);
3604 ibmvnic_send_crq(adapter, &crq);
3606 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3607 atomic_inc(&adapter->running_cap_crqs);
3608 ibmvnic_send_crq(adapter, &crq);
3610 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3611 atomic_inc(&adapter->running_cap_crqs);
3612 ibmvnic_send_crq(adapter, &crq);
3614 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3615 atomic_inc(&adapter->running_cap_crqs);
3616 ibmvnic_send_crq(adapter, &crq);
3618 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3619 atomic_inc(&adapter->running_cap_crqs);
3620 ibmvnic_send_crq(adapter, &crq);
3622 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3623 atomic_inc(&adapter->running_cap_crqs);
3624 ibmvnic_send_crq(adapter, &crq);
3626 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3627 atomic_inc(&adapter->running_cap_crqs);
3628 ibmvnic_send_crq(adapter, &crq);
3630 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3631 atomic_inc(&adapter->running_cap_crqs);
3632 ibmvnic_send_crq(adapter, &crq);
3634 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3635 atomic_inc(&adapter->running_cap_crqs);
3636 ibmvnic_send_crq(adapter, &crq);
3638 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3639 atomic_inc(&adapter->running_cap_crqs);
3640 ibmvnic_send_crq(adapter, &crq);
3642 crq.query_capability.capability =
3643 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3644 atomic_inc(&adapter->running_cap_crqs);
3645 ibmvnic_send_crq(adapter, &crq);
3647 crq.query_capability.capability =
3648 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3649 atomic_inc(&adapter->running_cap_crqs);
3650 ibmvnic_send_crq(adapter, &crq);
3652 crq.query_capability.capability =
3653 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3654 atomic_inc(&adapter->running_cap_crqs);
3655 ibmvnic_send_crq(adapter, &crq);
3657 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3658 atomic_inc(&adapter->running_cap_crqs);
3659 ibmvnic_send_crq(adapter, &crq);
3662 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3663 struct ibmvnic_adapter *adapter)
3665 struct device *dev = &adapter->vdev->dev;
3667 if (crq->get_vpd_size_rsp.rc.code) {
3668 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3669 crq->get_vpd_size_rsp.rc.code);
3670 complete(&adapter->fw_done);
3674 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3675 complete(&adapter->fw_done);
3678 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3679 struct ibmvnic_adapter *adapter)
3681 struct device *dev = &adapter->vdev->dev;
3682 unsigned char *substr = NULL;
3683 u8 fw_level_len = 0;
3685 memset(adapter->fw_version, 0, 32);
3687 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3690 if (crq->get_vpd_rsp.rc.code) {
3691 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3692 crq->get_vpd_rsp.rc.code);
3696 /* get the position of the firmware version info
3697 * located after the ASCII 'RM' substring in the buffer
3699 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3701 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3705 /* get length of firmware level ASCII substring */
3706 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3707 fw_level_len = *(substr + 2);
3709 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3713 /* copy firmware version string from vpd into adapter */
3714 if ((substr + 3 + fw_level_len) <
3715 (adapter->vpd->buff + adapter->vpd->len)) {
3716 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3718 dev_info(dev, "FW substr extrapolated VPD buff\n");
3722 if (adapter->fw_version[0] == '\0')
3723 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3724 complete(&adapter->fw_done);
3727 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3729 struct device *dev = &adapter->vdev->dev;
3730 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3731 union ibmvnic_crq crq;
3734 dma_unmap_single(dev, adapter->ip_offload_tok,
3735 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3737 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3738 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3739 netdev_dbg(adapter->netdev, "%016lx\n",
3740 ((unsigned long int *)(buf))[i]);
3742 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3743 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3744 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3745 buf->tcp_ipv4_chksum);
3746 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3747 buf->tcp_ipv6_chksum);
3748 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3749 buf->udp_ipv4_chksum);
3750 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3751 buf->udp_ipv6_chksum);
3752 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3753 buf->large_tx_ipv4);
3754 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3755 buf->large_tx_ipv6);
3756 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3757 buf->large_rx_ipv4);
3758 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3759 buf->large_rx_ipv6);
3760 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3761 buf->max_ipv4_header_size);
3762 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3763 buf->max_ipv6_header_size);
3764 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3765 buf->max_tcp_header_size);
3766 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3767 buf->max_udp_header_size);
3768 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3769 buf->max_large_tx_size);
3770 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3771 buf->max_large_rx_size);
3772 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3773 buf->ipv6_extension_header);
3774 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3775 buf->tcp_pseudosum_req);
3776 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3777 buf->num_ipv6_ext_headers);
3778 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3779 buf->off_ipv6_ext_headers);
3781 adapter->ip_offload_ctrl_tok =
3782 dma_map_single(dev, &adapter->ip_offload_ctrl,
3783 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3785 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3786 dev_err(dev, "Couldn't map ip offload control buffer\n");
3790 adapter->ip_offload_ctrl.len =
3791 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3792 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3793 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3794 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3795 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3796 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3797 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3798 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3799 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3800 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3802 /* large_rx disabled for now, additional features needed */
3803 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3804 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3806 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3808 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3809 adapter->netdev->features |= NETIF_F_IP_CSUM;
3811 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3812 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3814 if ((adapter->netdev->features &
3815 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3816 adapter->netdev->features |= NETIF_F_RXCSUM;
3818 if (buf->large_tx_ipv4)
3819 adapter->netdev->features |= NETIF_F_TSO;
3820 if (buf->large_tx_ipv6)
3821 adapter->netdev->features |= NETIF_F_TSO6;
3823 adapter->netdev->hw_features |= adapter->netdev->features;
3825 memset(&crq, 0, sizeof(crq));
3826 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3827 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3828 crq.control_ip_offload.len =
3829 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3830 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3831 ibmvnic_send_crq(adapter, &crq);
3834 static const char *ibmvnic_fw_err_cause(u16 cause)
3837 case ADAPTER_PROBLEM:
3838 return "adapter problem";
3840 return "bus problem";
3842 return "firmware problem";
3844 return "device driver problem";
3846 return "EEH recovery";
3848 return "firmware updated";
3850 return "low Memory";
3856 static void handle_error_indication(union ibmvnic_crq *crq,
3857 struct ibmvnic_adapter *adapter)
3859 struct device *dev = &adapter->vdev->dev;
3862 cause = be16_to_cpu(crq->error_indication.error_cause);
3864 dev_warn_ratelimited(dev,
3865 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3866 crq->error_indication.flags
3867 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3868 ibmvnic_fw_err_cause(cause));
3870 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3871 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3873 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3876 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3877 struct ibmvnic_adapter *adapter)
3879 struct net_device *netdev = adapter->netdev;
3880 struct device *dev = &adapter->vdev->dev;
3883 rc = crq->change_mac_addr_rsp.rc.code;
3885 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3888 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3891 complete(&adapter->fw_done);
3895 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3896 struct ibmvnic_adapter *adapter)
3898 struct device *dev = &adapter->vdev->dev;
3902 atomic_dec(&adapter->running_cap_crqs);
3903 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3905 req_value = &adapter->req_tx_queues;
3909 req_value = &adapter->req_rx_queues;
3912 case REQ_RX_ADD_QUEUES:
3913 req_value = &adapter->req_rx_add_queues;
3916 case REQ_TX_ENTRIES_PER_SUBCRQ:
3917 req_value = &adapter->req_tx_entries_per_subcrq;
3918 name = "tx_entries_per_subcrq";
3920 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3921 req_value = &adapter->req_rx_add_entries_per_subcrq;
3922 name = "rx_add_entries_per_subcrq";
3925 req_value = &adapter->req_mtu;
3928 case PROMISC_REQUESTED:
3929 req_value = &adapter->promisc;
3933 dev_err(dev, "Got invalid cap request rsp %d\n",
3934 crq->request_capability.capability);
3938 switch (crq->request_capability_rsp.rc.code) {
3941 case PARTIALSUCCESS:
3942 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3944 (long int)be64_to_cpu(crq->request_capability_rsp.
3947 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3949 pr_err("mtu of %llu is not supported. Reverting.\n",
3951 *req_value = adapter->fallback.mtu;
3954 be64_to_cpu(crq->request_capability_rsp.number);
3957 ibmvnic_send_req_caps(adapter, 1);
3960 dev_err(dev, "Error %d in request cap rsp\n",
3961 crq->request_capability_rsp.rc.code);
3965 /* Done receiving requested capabilities, query IP offload support */
3966 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3967 union ibmvnic_crq newcrq;
3968 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3969 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3970 &adapter->ip_offload_buf;
3972 adapter->wait_capability = false;
3973 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3977 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3978 if (!firmware_has_feature(FW_FEATURE_CMO))
3979 dev_err(dev, "Couldn't map offload buffer\n");
3983 memset(&newcrq, 0, sizeof(newcrq));
3984 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3985 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3986 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3987 newcrq.query_ip_offload.ioba =
3988 cpu_to_be32(adapter->ip_offload_tok);
3990 ibmvnic_send_crq(adapter, &newcrq);
3994 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3995 struct ibmvnic_adapter *adapter)
3997 struct device *dev = &adapter->vdev->dev;
3998 struct net_device *netdev = adapter->netdev;
3999 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4000 struct ibmvnic_login_buffer *login = adapter->login_buf;
4003 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4005 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4006 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4008 /* If the number of queues requested can't be allocated by the
4009 * server, the login response will return with code 1. We will need
4010 * to resend the login buffer with fewer queues requested.
4012 if (login_rsp_crq->generic.rc.code) {
4013 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4014 complete(&adapter->init_done);
4018 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4020 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4021 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4022 netdev_dbg(adapter->netdev, "%016lx\n",
4023 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4027 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4028 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4029 adapter->req_rx_add_queues !=
4030 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4031 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4032 ibmvnic_remove(adapter->vdev);
4035 release_login_buffer(adapter);
4036 complete(&adapter->init_done);
4041 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4042 struct ibmvnic_adapter *adapter)
4044 struct device *dev = &adapter->vdev->dev;
4047 rc = crq->request_unmap_rsp.rc.code;
4049 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4052 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4053 struct ibmvnic_adapter *adapter)
4055 struct net_device *netdev = adapter->netdev;
4056 struct device *dev = &adapter->vdev->dev;
4059 rc = crq->query_map_rsp.rc.code;
4061 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4064 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4065 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4066 crq->query_map_rsp.free_pages);
4069 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4070 struct ibmvnic_adapter *adapter)
4072 struct net_device *netdev = adapter->netdev;
4073 struct device *dev = &adapter->vdev->dev;
4076 atomic_dec(&adapter->running_cap_crqs);
4077 netdev_dbg(netdev, "Outstanding queries: %d\n",
4078 atomic_read(&adapter->running_cap_crqs));
4079 rc = crq->query_capability.rc.code;
4081 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4085 switch (be16_to_cpu(crq->query_capability.capability)) {
4087 adapter->min_tx_queues =
4088 be64_to_cpu(crq->query_capability.number);
4089 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4090 adapter->min_tx_queues);
4093 adapter->min_rx_queues =
4094 be64_to_cpu(crq->query_capability.number);
4095 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4096 adapter->min_rx_queues);
4098 case MIN_RX_ADD_QUEUES:
4099 adapter->min_rx_add_queues =
4100 be64_to_cpu(crq->query_capability.number);
4101 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4102 adapter->min_rx_add_queues);
4105 adapter->max_tx_queues =
4106 be64_to_cpu(crq->query_capability.number);
4107 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4108 adapter->max_tx_queues);
4111 adapter->max_rx_queues =
4112 be64_to_cpu(crq->query_capability.number);
4113 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4114 adapter->max_rx_queues);
4116 case MAX_RX_ADD_QUEUES:
4117 adapter->max_rx_add_queues =
4118 be64_to_cpu(crq->query_capability.number);
4119 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4120 adapter->max_rx_add_queues);
4122 case MIN_TX_ENTRIES_PER_SUBCRQ:
4123 adapter->min_tx_entries_per_subcrq =
4124 be64_to_cpu(crq->query_capability.number);
4125 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4126 adapter->min_tx_entries_per_subcrq);
4128 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4129 adapter->min_rx_add_entries_per_subcrq =
4130 be64_to_cpu(crq->query_capability.number);
4131 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4132 adapter->min_rx_add_entries_per_subcrq);
4134 case MAX_TX_ENTRIES_PER_SUBCRQ:
4135 adapter->max_tx_entries_per_subcrq =
4136 be64_to_cpu(crq->query_capability.number);
4137 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4138 adapter->max_tx_entries_per_subcrq);
4140 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4141 adapter->max_rx_add_entries_per_subcrq =
4142 be64_to_cpu(crq->query_capability.number);
4143 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4144 adapter->max_rx_add_entries_per_subcrq);
4146 case TCP_IP_OFFLOAD:
4147 adapter->tcp_ip_offload =
4148 be64_to_cpu(crq->query_capability.number);
4149 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4150 adapter->tcp_ip_offload);
4152 case PROMISC_SUPPORTED:
4153 adapter->promisc_supported =
4154 be64_to_cpu(crq->query_capability.number);
4155 netdev_dbg(netdev, "promisc_supported = %lld\n",
4156 adapter->promisc_supported);
4159 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4160 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4161 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4164 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4165 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4166 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4168 case MAX_MULTICAST_FILTERS:
4169 adapter->max_multicast_filters =
4170 be64_to_cpu(crq->query_capability.number);
4171 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4172 adapter->max_multicast_filters);
4174 case VLAN_HEADER_INSERTION:
4175 adapter->vlan_header_insertion =
4176 be64_to_cpu(crq->query_capability.number);
4177 if (adapter->vlan_header_insertion)
4178 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4179 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4180 adapter->vlan_header_insertion);
4182 case RX_VLAN_HEADER_INSERTION:
4183 adapter->rx_vlan_header_insertion =
4184 be64_to_cpu(crq->query_capability.number);
4185 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4186 adapter->rx_vlan_header_insertion);
4188 case MAX_TX_SG_ENTRIES:
4189 adapter->max_tx_sg_entries =
4190 be64_to_cpu(crq->query_capability.number);
4191 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4192 adapter->max_tx_sg_entries);
4194 case RX_SG_SUPPORTED:
4195 adapter->rx_sg_supported =
4196 be64_to_cpu(crq->query_capability.number);
4197 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4198 adapter->rx_sg_supported);
4200 case OPT_TX_COMP_SUB_QUEUES:
4201 adapter->opt_tx_comp_sub_queues =
4202 be64_to_cpu(crq->query_capability.number);
4203 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4204 adapter->opt_tx_comp_sub_queues);
4206 case OPT_RX_COMP_QUEUES:
4207 adapter->opt_rx_comp_queues =
4208 be64_to_cpu(crq->query_capability.number);
4209 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4210 adapter->opt_rx_comp_queues);
4212 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4213 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4214 be64_to_cpu(crq->query_capability.number);
4215 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4216 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4218 case OPT_TX_ENTRIES_PER_SUBCRQ:
4219 adapter->opt_tx_entries_per_subcrq =
4220 be64_to_cpu(crq->query_capability.number);
4221 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4222 adapter->opt_tx_entries_per_subcrq);
4224 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4225 adapter->opt_rxba_entries_per_subcrq =
4226 be64_to_cpu(crq->query_capability.number);
4227 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4228 adapter->opt_rxba_entries_per_subcrq);
4230 case TX_RX_DESC_REQ:
4231 adapter->tx_rx_desc_req = crq->query_capability.number;
4232 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4233 adapter->tx_rx_desc_req);
4237 netdev_err(netdev, "Got invalid cap rsp %d\n",
4238 crq->query_capability.capability);
4242 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4243 adapter->wait_capability = false;
4244 ibmvnic_send_req_caps(adapter, 0);
4248 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4249 struct ibmvnic_adapter *adapter)
4251 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4252 struct net_device *netdev = adapter->netdev;
4253 struct device *dev = &adapter->vdev->dev;
4254 u64 *u64_crq = (u64 *)crq;
4257 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4258 (unsigned long int)cpu_to_be64(u64_crq[0]),
4259 (unsigned long int)cpu_to_be64(u64_crq[1]));
4260 switch (gen_crq->first) {
4261 case IBMVNIC_CRQ_INIT_RSP:
4262 switch (gen_crq->cmd) {
4263 case IBMVNIC_CRQ_INIT:
4264 dev_info(dev, "Partner initialized\n");
4265 adapter->from_passive_init = true;
4266 adapter->failover_pending = false;
4267 if (!completion_done(&adapter->init_done)) {
4268 complete(&adapter->init_done);
4269 adapter->init_done_rc = -EIO;
4271 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4273 case IBMVNIC_CRQ_INIT_COMPLETE:
4274 dev_info(dev, "Partner initialization complete\n");
4275 adapter->crq.active = true;
4276 send_version_xchg(adapter);
4279 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4282 case IBMVNIC_CRQ_XPORT_EVENT:
4283 netif_carrier_off(netdev);
4284 adapter->crq.active = false;
4285 if (adapter->resetting)
4286 adapter->force_reset_recovery = true;
4287 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4288 dev_info(dev, "Migrated, re-enabling adapter\n");
4289 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4290 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4291 dev_info(dev, "Backing device failover detected\n");
4292 adapter->failover_pending = true;
4294 /* The adapter lost the connection */
4295 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4297 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4300 case IBMVNIC_CRQ_CMD_RSP:
4303 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4308 switch (gen_crq->cmd) {
4309 case VERSION_EXCHANGE_RSP:
4310 rc = crq->version_exchange_rsp.rc.code;
4312 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4315 dev_info(dev, "Partner protocol version is %d\n",
4316 crq->version_exchange_rsp.version);
4317 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4320 be16_to_cpu(crq->version_exchange_rsp.version);
4321 send_cap_queries(adapter);
4323 case QUERY_CAPABILITY_RSP:
4324 handle_query_cap_rsp(crq, adapter);
4327 handle_query_map_rsp(crq, adapter);
4329 case REQUEST_MAP_RSP:
4330 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4331 complete(&adapter->fw_done);
4333 case REQUEST_UNMAP_RSP:
4334 handle_request_unmap_rsp(crq, adapter);
4336 case REQUEST_CAPABILITY_RSP:
4337 handle_request_cap_rsp(crq, adapter);
4340 netdev_dbg(netdev, "Got Login Response\n");
4341 handle_login_rsp(crq, adapter);
4343 case LOGICAL_LINK_STATE_RSP:
4345 "Got Logical Link State Response, state: %d rc: %d\n",
4346 crq->logical_link_state_rsp.link_state,
4347 crq->logical_link_state_rsp.rc.code);
4348 adapter->logical_link_state =
4349 crq->logical_link_state_rsp.link_state;
4350 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4351 complete(&adapter->init_done);
4353 case LINK_STATE_INDICATION:
4354 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4355 adapter->phys_link_state =
4356 crq->link_state_indication.phys_link_state;
4357 adapter->logical_link_state =
4358 crq->link_state_indication.logical_link_state;
4360 case CHANGE_MAC_ADDR_RSP:
4361 netdev_dbg(netdev, "Got MAC address change Response\n");
4362 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4364 case ERROR_INDICATION:
4365 netdev_dbg(netdev, "Got Error Indication\n");
4366 handle_error_indication(crq, adapter);
4368 case REQUEST_STATISTICS_RSP:
4369 netdev_dbg(netdev, "Got Statistics Response\n");
4370 complete(&adapter->stats_done);
4372 case QUERY_IP_OFFLOAD_RSP:
4373 netdev_dbg(netdev, "Got Query IP offload Response\n");
4374 handle_query_ip_offload_rsp(adapter);
4376 case MULTICAST_CTRL_RSP:
4377 netdev_dbg(netdev, "Got multicast control Response\n");
4379 case CONTROL_IP_OFFLOAD_RSP:
4380 netdev_dbg(netdev, "Got Control IP offload Response\n");
4381 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4382 sizeof(adapter->ip_offload_ctrl),
4384 complete(&adapter->init_done);
4386 case COLLECT_FW_TRACE_RSP:
4387 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4388 complete(&adapter->fw_done);
4390 case GET_VPD_SIZE_RSP:
4391 handle_vpd_size_rsp(crq, adapter);
4394 handle_vpd_rsp(crq, adapter);
4397 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4402 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4404 struct ibmvnic_adapter *adapter = instance;
4406 tasklet_schedule(&adapter->tasklet);
4410 static void ibmvnic_tasklet(void *data)
4412 struct ibmvnic_adapter *adapter = data;
4413 struct ibmvnic_crq_queue *queue = &adapter->crq;
4414 union ibmvnic_crq *crq;
4415 unsigned long flags;
4418 spin_lock_irqsave(&queue->lock, flags);
4420 /* Pull all the valid messages off the CRQ */
4421 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4422 ibmvnic_handle_crq(crq, adapter);
4423 crq->generic.first = 0;
4426 /* remain in tasklet until all
4427 * capabilities responses are received
4429 if (!adapter->wait_capability)
4432 /* if capabilities CRQ's were sent in this tasklet, the following
4433 * tasklet must wait until all responses are received
4435 if (atomic_read(&adapter->running_cap_crqs) != 0)
4436 adapter->wait_capability = true;
4437 spin_unlock_irqrestore(&queue->lock, flags);
4440 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4442 struct vio_dev *vdev = adapter->vdev;
4446 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4447 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4450 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4455 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4457 struct ibmvnic_crq_queue *crq = &adapter->crq;
4458 struct device *dev = &adapter->vdev->dev;
4459 struct vio_dev *vdev = adapter->vdev;
4464 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4465 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4467 /* Clean out the queue */
4468 memset(crq->msgs, 0, PAGE_SIZE);
4470 crq->active = false;
4472 /* And re-open it again */
4473 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4474 crq->msg_token, PAGE_SIZE);
4477 /* Adapter is good, but other end is not ready */
4478 dev_warn(dev, "Partner adapter not ready\n");
4480 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4485 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4487 struct ibmvnic_crq_queue *crq = &adapter->crq;
4488 struct vio_dev *vdev = adapter->vdev;
4494 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4495 free_irq(vdev->irq, adapter);
4496 tasklet_kill(&adapter->tasklet);
4498 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4499 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4501 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4503 free_page((unsigned long)crq->msgs);
4505 crq->active = false;
4508 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4510 struct ibmvnic_crq_queue *crq = &adapter->crq;
4511 struct device *dev = &adapter->vdev->dev;
4512 struct vio_dev *vdev = adapter->vdev;
4513 int rc, retrc = -ENOMEM;
4518 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4519 /* Should we allocate more than one page? */
4524 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4525 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4527 if (dma_mapping_error(dev, crq->msg_token))
4530 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4531 crq->msg_token, PAGE_SIZE);
4533 if (rc == H_RESOURCE)
4534 /* maybe kexecing and resource is busy. try a reset */
4535 rc = ibmvnic_reset_crq(adapter);
4538 if (rc == H_CLOSED) {
4539 dev_warn(dev, "Partner adapter not ready\n");
4541 dev_warn(dev, "Error %d opening adapter\n", rc);
4542 goto reg_crq_failed;
4547 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4548 (unsigned long)adapter);
4550 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4551 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4554 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4556 goto req_irq_failed;
4559 rc = vio_enable_interrupts(vdev);
4561 dev_err(dev, "Error %d enabling interrupts\n", rc);
4562 goto req_irq_failed;
4566 spin_lock_init(&crq->lock);
4571 tasklet_kill(&adapter->tasklet);
4573 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4574 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4576 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4578 free_page((unsigned long)crq->msgs);
4583 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4585 struct device *dev = &adapter->vdev->dev;
4586 unsigned long timeout = msecs_to_jiffies(30000);
4587 u64 old_num_rx_queues, old_num_tx_queues;
4590 adapter->from_passive_init = false;
4592 old_num_rx_queues = adapter->req_rx_queues;
4593 old_num_tx_queues = adapter->req_tx_queues;
4595 init_completion(&adapter->init_done);
4596 adapter->init_done_rc = 0;
4597 ibmvnic_send_crq_init(adapter);
4598 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4599 dev_err(dev, "Initialization sequence timed out\n");
4603 if (adapter->init_done_rc) {
4604 release_crq_queue(adapter);
4605 return adapter->init_done_rc;
4608 if (adapter->from_passive_init) {
4609 adapter->state = VNIC_OPEN;
4610 adapter->from_passive_init = false;
4614 if (adapter->resetting && !adapter->wait_for_reset &&
4615 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4616 if (adapter->req_rx_queues != old_num_rx_queues ||
4617 adapter->req_tx_queues != old_num_tx_queues) {
4618 release_sub_crqs(adapter, 0);
4619 rc = init_sub_crqs(adapter);
4621 rc = reset_sub_crq_queues(adapter);
4624 rc = init_sub_crqs(adapter);
4628 dev_err(dev, "Initialization of sub crqs failed\n");
4629 release_crq_queue(adapter);
4633 rc = init_sub_crq_irqs(adapter);
4635 dev_err(dev, "Failed to initialize sub crq irqs\n");
4636 release_crq_queue(adapter);
4642 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4644 struct device *dev = &adapter->vdev->dev;
4645 unsigned long timeout = msecs_to_jiffies(30000);
4648 adapter->from_passive_init = false;
4650 init_completion(&adapter->init_done);
4651 adapter->init_done_rc = 0;
4652 ibmvnic_send_crq_init(adapter);
4653 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4654 dev_err(dev, "Initialization sequence timed out\n");
4658 if (adapter->init_done_rc) {
4659 release_crq_queue(adapter);
4660 return adapter->init_done_rc;
4663 if (adapter->from_passive_init) {
4664 adapter->state = VNIC_OPEN;
4665 adapter->from_passive_init = false;
4669 rc = init_sub_crqs(adapter);
4671 dev_err(dev, "Initialization of sub crqs failed\n");
4672 release_crq_queue(adapter);
4676 rc = init_sub_crq_irqs(adapter);
4678 dev_err(dev, "Failed to initialize sub crq irqs\n");
4679 release_crq_queue(adapter);
4685 static struct device_attribute dev_attr_failover;
4687 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4689 struct ibmvnic_adapter *adapter;
4690 struct net_device *netdev;
4691 unsigned char *mac_addr_p;
4694 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4697 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4698 VETH_MAC_ADDR, NULL);
4701 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4702 __FILE__, __LINE__);
4706 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4707 IBMVNIC_MAX_QUEUES);
4711 adapter = netdev_priv(netdev);
4712 adapter->state = VNIC_PROBING;
4713 dev_set_drvdata(&dev->dev, netdev);
4714 adapter->vdev = dev;
4715 adapter->netdev = netdev;
4717 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4718 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4719 netdev->irq = dev->irq;
4720 netdev->netdev_ops = &ibmvnic_netdev_ops;
4721 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4722 SET_NETDEV_DEV(netdev, &dev->dev);
4724 spin_lock_init(&adapter->stats_lock);
4726 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4727 INIT_LIST_HEAD(&adapter->rwi_list);
4728 mutex_init(&adapter->reset_lock);
4729 mutex_init(&adapter->rwi_lock);
4730 adapter->resetting = false;
4732 adapter->mac_change_pending = false;
4735 rc = init_crq_queue(adapter);
4737 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4739 goto ibmvnic_init_fail;
4742 rc = ibmvnic_init(adapter);
4743 if (rc && rc != EAGAIN)
4744 goto ibmvnic_init_fail;
4745 } while (rc == EAGAIN);
4747 rc = init_stats_buffers(adapter);
4749 goto ibmvnic_init_fail;
4751 rc = init_stats_token(adapter);
4753 goto ibmvnic_stats_fail;
4755 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4756 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4757 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4759 rc = device_create_file(&dev->dev, &dev_attr_failover);
4761 goto ibmvnic_dev_file_err;
4763 netif_carrier_off(netdev);
4764 rc = register_netdev(netdev);
4766 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4767 goto ibmvnic_register_fail;
4769 dev_info(&dev->dev, "ibmvnic registered\n");
4771 adapter->state = VNIC_PROBED;
4773 adapter->wait_for_reset = false;
4777 ibmvnic_register_fail:
4778 device_remove_file(&dev->dev, &dev_attr_failover);
4780 ibmvnic_dev_file_err:
4781 release_stats_token(adapter);
4784 release_stats_buffers(adapter);
4787 release_sub_crqs(adapter, 1);
4788 release_crq_queue(adapter);
4789 free_netdev(netdev);
4794 static int ibmvnic_remove(struct vio_dev *dev)
4796 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4797 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4799 adapter->state = VNIC_REMOVING;
4800 unregister_netdev(netdev);
4801 mutex_lock(&adapter->reset_lock);
4803 release_resources(adapter);
4804 release_sub_crqs(adapter, 1);
4805 release_crq_queue(adapter);
4807 release_stats_token(adapter);
4808 release_stats_buffers(adapter);
4810 adapter->state = VNIC_REMOVED;
4812 mutex_unlock(&adapter->reset_lock);
4813 device_remove_file(&dev->dev, &dev_attr_failover);
4814 free_netdev(netdev);
4815 dev_set_drvdata(&dev->dev, NULL);
4820 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4821 const char *buf, size_t count)
4823 struct net_device *netdev = dev_get_drvdata(dev);
4824 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4825 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4826 __be64 session_token;
4829 if (!sysfs_streq(buf, "1"))
4832 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4833 H_GET_SESSION_TOKEN, 0, 0, 0);
4835 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4840 session_token = (__be64)retbuf[0];
4841 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4842 be64_to_cpu(session_token));
4843 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4844 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4846 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4854 static DEVICE_ATTR_WO(failover);
4856 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4858 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4859 struct ibmvnic_adapter *adapter;
4860 struct iommu_table *tbl;
4861 unsigned long ret = 0;
4864 tbl = get_iommu_table_base(&vdev->dev);
4866 /* netdev inits at probe time along with the structures we need below*/
4868 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4870 adapter = netdev_priv(netdev);
4872 ret += PAGE_SIZE; /* the crq message queue */
4873 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4875 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4876 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4878 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4880 ret += adapter->rx_pool[i].size *
4881 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4886 static int ibmvnic_resume(struct device *dev)
4888 struct net_device *netdev = dev_get_drvdata(dev);
4889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4891 if (adapter->state != VNIC_OPEN)
4894 tasklet_schedule(&adapter->tasklet);
4899 static const struct vio_device_id ibmvnic_device_table[] = {
4900 {"network", "IBM,vnic"},
4903 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4905 static const struct dev_pm_ops ibmvnic_pm_ops = {
4906 .resume = ibmvnic_resume
4909 static struct vio_driver ibmvnic_driver = {
4910 .id_table = ibmvnic_device_table,
4911 .probe = ibmvnic_probe,
4912 .remove = ibmvnic_remove,
4913 .get_desired_dma = ibmvnic_get_desired_dma,
4914 .name = ibmvnic_driver_name,
4915 .pm = &ibmvnic_pm_ops,
4918 /* module functions */
4919 static int __init ibmvnic_module_init(void)
4921 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4922 IBMVNIC_DRIVER_VERSION);
4924 return vio_register_driver(&ibmvnic_driver);
4927 static void __exit ibmvnic_module_exit(void)
4929 vio_unregister_driver(&ibmvnic_driver);
4932 module_init(ibmvnic_module_init);
4933 module_exit(ibmvnic_module_exit);