1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115 static int ibmvnic_init(struct ibmvnic_adapter *);
116 static void release_crq_queue(struct ibmvnic_adapter *);
118 struct ibmvnic_stat {
119 char name[ETH_GSTRING_LEN];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
152 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153 unsigned long length, unsigned long *number,
156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
159 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
167 struct ibmvnic_long_term_buff *ltb, int size)
169 struct device *dev = &adapter->vdev->dev;
172 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
176 dev_err(dev, "Couldn't alloc long term buffer\n");
179 ltb->map_id = adapter->map_id;
182 init_completion(&adapter->fw_done);
183 send_request_map(adapter, ltb->addr,
184 ltb->size, ltb->map_id);
185 wait_for_completion(&adapter->fw_done);
189 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
190 struct ibmvnic_long_term_buff *ltb)
192 struct device *dev = &adapter->vdev->dev;
197 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
198 adapter->reset_reason != VNIC_RESET_MOBILITY)
199 send_request_unmap(adapter, ltb->map_id);
200 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
203 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
204 struct ibmvnic_rx_pool *pool)
206 int count = pool->size - atomic_read(&pool->available);
207 struct device *dev = &adapter->vdev->dev;
208 int buffers_added = 0;
209 unsigned long lpar_rc;
210 union sub_crq sub_crq;
220 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
221 be32_to_cpu(adapter->login_rsp_buf->
224 for (i = 0; i < count; ++i) {
225 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
227 dev_err(dev, "Couldn't replenish rx buff\n");
228 adapter->replenish_no_mem++;
232 index = pool->free_map[pool->next_free];
234 if (pool->rx_buff[index].skb)
235 dev_err(dev, "Inconsistent free_map!\n");
237 /* Copy the skb to the long term mapped DMA buffer */
238 offset = index * pool->buff_size;
239 dst = pool->long_term_buff.buff + offset;
240 memset(dst, 0, pool->buff_size);
241 dma_addr = pool->long_term_buff.addr + offset;
242 pool->rx_buff[index].data = dst;
244 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
245 pool->rx_buff[index].dma = dma_addr;
246 pool->rx_buff[index].skb = skb;
247 pool->rx_buff[index].pool_index = pool->index;
248 pool->rx_buff[index].size = pool->buff_size;
250 memset(&sub_crq, 0, sizeof(sub_crq));
251 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
252 sub_crq.rx_add.correlator =
253 cpu_to_be64((u64)&pool->rx_buff[index]);
254 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
255 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
257 /* The length field of the sCRQ is defined to be 24 bits so the
258 * buffer size needs to be left shifted by a byte before it is
259 * converted to big endian to prevent the last byte from being
262 #ifdef __LITTLE_ENDIAN__
265 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
267 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
269 if (lpar_rc != H_SUCCESS)
273 adapter->replenish_add_buff_success++;
274 pool->next_free = (pool->next_free + 1) % pool->size;
276 atomic_add(buffers_added, &pool->available);
280 dev_info(dev, "replenish pools failure\n");
281 pool->free_map[pool->next_free] = index;
282 pool->rx_buff[index].skb = NULL;
283 if (!dma_mapping_error(dev, dma_addr))
284 dma_unmap_single(dev, dma_addr, pool->buff_size,
287 dev_kfree_skb_any(skb);
288 adapter->replenish_add_buff_failure++;
289 atomic_add(buffers_added, &pool->available);
292 static void replenish_pools(struct ibmvnic_adapter *adapter)
296 adapter->replenish_task_cycles++;
297 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
299 if (adapter->rx_pool[i].active)
300 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
304 static void release_stats_token(struct ibmvnic_adapter *adapter)
306 struct device *dev = &adapter->vdev->dev;
308 if (!adapter->stats_token)
311 dma_unmap_single(dev, adapter->stats_token,
312 sizeof(struct ibmvnic_statistics),
314 adapter->stats_token = 0;
317 static int init_stats_token(struct ibmvnic_adapter *adapter)
319 struct device *dev = &adapter->vdev->dev;
322 stok = dma_map_single(dev, &adapter->stats,
323 sizeof(struct ibmvnic_statistics),
325 if (dma_mapping_error(dev, stok)) {
326 dev_err(dev, "Couldn't map stats buffer\n");
330 adapter->stats_token = stok;
334 static void release_rx_pools(struct ibmvnic_adapter *adapter)
336 struct ibmvnic_rx_pool *rx_pool;
340 if (!adapter->rx_pool)
343 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 for (i = 0; i < rx_scrqs; i++) {
345 rx_pool = &adapter->rx_pool[i];
347 kfree(rx_pool->free_map);
348 free_long_term_buff(adapter, &rx_pool->long_term_buff);
350 if (!rx_pool->rx_buff)
353 for (j = 0; j < rx_pool->size; j++) {
354 if (rx_pool->rx_buff[j].skb) {
355 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
356 rx_pool->rx_buff[i].skb = NULL;
360 kfree(rx_pool->rx_buff);
363 kfree(adapter->rx_pool);
364 adapter->rx_pool = NULL;
367 static int init_rx_pools(struct net_device *netdev)
369 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
370 struct device *dev = &adapter->vdev->dev;
371 struct ibmvnic_rx_pool *rx_pool;
377 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
378 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
379 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
381 adapter->rx_pool = kcalloc(rxadd_subcrqs,
382 sizeof(struct ibmvnic_rx_pool),
384 if (!adapter->rx_pool) {
385 dev_err(dev, "Failed to allocate rx pools\n");
389 for (i = 0; i < rxadd_subcrqs; i++) {
390 rx_pool = &adapter->rx_pool[i];
392 netdev_dbg(adapter->netdev,
393 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
394 i, adapter->req_rx_add_entries_per_subcrq,
395 be64_to_cpu(size_array[i]));
397 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
399 rx_pool->buff_size = be64_to_cpu(size_array[i]);
402 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
404 if (!rx_pool->free_map) {
405 release_rx_pools(adapter);
409 rx_pool->rx_buff = kcalloc(rx_pool->size,
410 sizeof(struct ibmvnic_rx_buff),
412 if (!rx_pool->rx_buff) {
413 dev_err(dev, "Couldn't alloc rx buffers\n");
414 release_rx_pools(adapter);
418 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
419 rx_pool->size * rx_pool->buff_size)) {
420 release_rx_pools(adapter);
424 for (j = 0; j < rx_pool->size; ++j)
425 rx_pool->free_map[j] = j;
427 atomic_set(&rx_pool->available, 0);
428 rx_pool->next_alloc = 0;
429 rx_pool->next_free = 0;
435 static void release_tx_pools(struct ibmvnic_adapter *adapter)
437 struct ibmvnic_tx_pool *tx_pool;
440 if (!adapter->tx_pool)
443 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
444 for (i = 0; i < tx_scrqs; i++) {
445 tx_pool = &adapter->tx_pool[i];
446 kfree(tx_pool->tx_buff);
447 free_long_term_buff(adapter, &tx_pool->long_term_buff);
448 kfree(tx_pool->free_map);
451 kfree(adapter->tx_pool);
452 adapter->tx_pool = NULL;
455 static int init_tx_pools(struct net_device *netdev)
457 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
458 struct device *dev = &adapter->vdev->dev;
459 struct ibmvnic_tx_pool *tx_pool;
463 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
464 adapter->tx_pool = kcalloc(tx_subcrqs,
465 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
466 if (!adapter->tx_pool)
469 for (i = 0; i < tx_subcrqs; i++) {
470 tx_pool = &adapter->tx_pool[i];
471 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
472 sizeof(struct ibmvnic_tx_buff),
474 if (!tx_pool->tx_buff) {
475 dev_err(dev, "tx pool buffer allocation failed\n");
476 release_tx_pools(adapter);
480 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
481 adapter->req_tx_entries_per_subcrq *
483 release_tx_pools(adapter);
487 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
488 sizeof(int), GFP_KERNEL);
489 if (!tx_pool->free_map) {
490 release_tx_pools(adapter);
494 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
495 tx_pool->free_map[j] = j;
497 tx_pool->consumer_index = 0;
498 tx_pool->producer_index = 0;
504 static void release_error_buffers(struct ibmvnic_adapter *adapter)
506 struct device *dev = &adapter->vdev->dev;
507 struct ibmvnic_error_buff *error_buff, *tmp;
510 spin_lock_irqsave(&adapter->error_list_lock, flags);
511 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
512 list_del(&error_buff->list);
513 dma_unmap_single(dev, error_buff->dma, error_buff->len,
515 kfree(error_buff->buff);
518 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
521 static int ibmvnic_login(struct net_device *netdev)
523 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
524 unsigned long timeout = msecs_to_jiffies(30000);
525 struct device *dev = &adapter->vdev->dev;
528 if (adapter->renegotiate) {
529 adapter->renegotiate = false;
530 release_sub_crqs(adapter);
532 reinit_completion(&adapter->init_done);
533 send_cap_queries(adapter);
534 if (!wait_for_completion_timeout(&adapter->init_done,
536 dev_err(dev, "Capabilities query timeout\n");
541 reinit_completion(&adapter->init_done);
543 if (!wait_for_completion_timeout(&adapter->init_done,
545 dev_err(dev, "Login timeout\n");
548 } while (adapter->renegotiate);
553 static void release_resources(struct ibmvnic_adapter *adapter)
555 release_tx_pools(adapter);
556 release_rx_pools(adapter);
558 release_stats_token(adapter);
559 release_error_buffers(adapter);
562 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
564 struct net_device *netdev = adapter->netdev;
565 unsigned long timeout = msecs_to_jiffies(30000);
566 union ibmvnic_crq crq;
570 netdev_err(netdev, "setting link state %d\n", link_state);
571 memset(&crq, 0, sizeof(crq));
572 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
573 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
574 crq.logical_link_state.link_state = link_state;
579 reinit_completion(&adapter->init_done);
580 rc = ibmvnic_send_crq(adapter, &crq);
582 netdev_err(netdev, "Failed to set link state\n");
586 if (!wait_for_completion_timeout(&adapter->init_done,
588 netdev_err(netdev, "timeout setting link state\n");
592 if (adapter->init_done_rc == 1) {
593 /* Partuial success, delay and re-send */
602 static int set_real_num_queues(struct net_device *netdev)
604 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
607 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
609 netdev_err(netdev, "failed to set the number of tx queues\n");
613 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
615 netdev_err(netdev, "failed to set the number of rx queues\n");
620 static int init_resources(struct ibmvnic_adapter *adapter)
622 struct net_device *netdev = adapter->netdev;
625 rc = set_real_num_queues(netdev);
629 rc = init_sub_crq_irqs(adapter);
631 netdev_err(netdev, "failed to initialize sub crq irqs\n");
635 rc = init_stats_token(adapter);
640 adapter->napi = kcalloc(adapter->req_rx_queues,
641 sizeof(struct napi_struct), GFP_KERNEL);
645 for (i = 0; i < adapter->req_rx_queues; i++) {
646 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
650 send_map_query(adapter);
652 rc = init_rx_pools(netdev);
656 rc = init_tx_pools(netdev);
660 static int __ibmvnic_open(struct net_device *netdev)
662 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
663 enum vnic_state prev_state = adapter->state;
666 adapter->state = VNIC_OPENING;
667 replenish_pools(adapter);
669 for (i = 0; i < adapter->req_rx_queues; i++)
670 napi_enable(&adapter->napi[i]);
672 /* We're ready to receive frames, enable the sub-crq interrupts and
673 * set the logical link state to up
675 for (i = 0; i < adapter->req_rx_queues; i++) {
676 if (prev_state == VNIC_CLOSED)
677 enable_irq(adapter->rx_scrq[i]->irq);
679 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
682 for (i = 0; i < adapter->req_tx_queues; i++) {
683 if (prev_state == VNIC_CLOSED)
684 enable_irq(adapter->tx_scrq[i]->irq);
686 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
689 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
691 for (i = 0; i < adapter->req_rx_queues; i++)
692 napi_disable(&adapter->napi[i]);
693 release_resources(adapter);
697 netif_tx_start_all_queues(netdev);
699 if (prev_state == VNIC_CLOSED) {
700 for (i = 0; i < adapter->req_rx_queues; i++)
701 napi_schedule(&adapter->napi[i]);
704 adapter->state = VNIC_OPEN;
708 static int ibmvnic_open(struct net_device *netdev)
710 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
713 mutex_lock(&adapter->reset_lock);
715 if (adapter->state != VNIC_CLOSED) {
716 rc = ibmvnic_login(netdev);
718 mutex_unlock(&adapter->reset_lock);
722 rc = init_resources(adapter);
724 netdev_err(netdev, "failed to initialize resources\n");
725 release_resources(adapter);
726 mutex_unlock(&adapter->reset_lock);
731 rc = __ibmvnic_open(netdev);
732 mutex_unlock(&adapter->reset_lock);
737 static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
741 if (adapter->tx_scrq) {
742 for (i = 0; i < adapter->req_tx_queues; i++)
743 if (adapter->tx_scrq[i])
744 disable_irq(adapter->tx_scrq[i]->irq);
747 if (adapter->rx_scrq) {
748 for (i = 0; i < adapter->req_rx_queues; i++)
749 if (adapter->rx_scrq[i])
750 disable_irq(adapter->rx_scrq[i]->irq);
754 static int __ibmvnic_close(struct net_device *netdev)
756 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
760 adapter->state = VNIC_CLOSING;
761 netif_tx_stop_all_queues(netdev);
762 disable_sub_crqs(adapter);
765 for (i = 0; i < adapter->req_rx_queues; i++)
766 napi_disable(&adapter->napi[i]);
769 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
771 adapter->state = VNIC_CLOSED;
775 static int ibmvnic_close(struct net_device *netdev)
777 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 mutex_lock(&adapter->reset_lock);
781 rc = __ibmvnic_close(netdev);
782 mutex_unlock(&adapter->reset_lock);
788 * build_hdr_data - creates L2/L3/L4 header data buffer
789 * @hdr_field - bitfield determining needed headers
790 * @skb - socket buffer
791 * @hdr_len - array of header lengths
792 * @tot_len - total length of data
794 * Reads hdr_field to determine which headers are needed by firmware.
795 * Builds a buffer containing these headers. Saves individual header
796 * lengths and total buffer length to be used to build descriptors.
798 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
799 int *hdr_len, u8 *hdr_data)
804 hdr_len[0] = sizeof(struct ethhdr);
806 if (skb->protocol == htons(ETH_P_IP)) {
807 hdr_len[1] = ip_hdr(skb)->ihl * 4;
808 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
809 hdr_len[2] = tcp_hdrlen(skb);
810 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
811 hdr_len[2] = sizeof(struct udphdr);
812 } else if (skb->protocol == htons(ETH_P_IPV6)) {
813 hdr_len[1] = sizeof(struct ipv6hdr);
814 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
815 hdr_len[2] = tcp_hdrlen(skb);
816 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
817 hdr_len[2] = sizeof(struct udphdr);
820 memset(hdr_data, 0, 120);
821 if ((hdr_field >> 6) & 1) {
822 hdr = skb_mac_header(skb);
823 memcpy(hdr_data, hdr, hdr_len[0]);
827 if ((hdr_field >> 5) & 1) {
828 hdr = skb_network_header(skb);
829 memcpy(hdr_data + len, hdr, hdr_len[1]);
833 if ((hdr_field >> 4) & 1) {
834 hdr = skb_transport_header(skb);
835 memcpy(hdr_data + len, hdr, hdr_len[2]);
842 * create_hdr_descs - create header and header extension descriptors
843 * @hdr_field - bitfield determining needed headers
844 * @data - buffer containing header data
845 * @len - length of data buffer
846 * @hdr_len - array of individual header lengths
847 * @scrq_arr - descriptor array
849 * Creates header and, if needed, header extension descriptors and
850 * places them in a descriptor array, scrq_arr
853 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
854 union sub_crq *scrq_arr)
856 union sub_crq hdr_desc;
861 while (tmp_len > 0) {
862 cur = hdr_data + len - tmp_len;
864 memset(&hdr_desc, 0, sizeof(hdr_desc));
865 if (cur != hdr_data) {
866 data = hdr_desc.hdr_ext.data;
867 tmp = tmp_len > 29 ? 29 : tmp_len;
868 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
869 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
870 hdr_desc.hdr_ext.len = tmp;
872 data = hdr_desc.hdr.data;
873 tmp = tmp_len > 24 ? 24 : tmp_len;
874 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
875 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
876 hdr_desc.hdr.len = tmp;
877 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
878 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
879 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
880 hdr_desc.hdr.flag = hdr_field << 1;
882 memcpy(data, cur, tmp);
884 *scrq_arr = hdr_desc;
890 * build_hdr_descs_arr - build a header descriptor array
891 * @skb - socket buffer
892 * @num_entries - number of descriptors to be sent
893 * @subcrq - first TX descriptor
894 * @hdr_field - bit field determining which headers will be sent
896 * This function will build a TX descriptor array with applicable
897 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
900 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
901 int *num_entries, u8 hdr_field)
903 int hdr_len[3] = {0, 0, 0};
905 u8 *hdr_data = txbuff->hdr_data;
907 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
912 num_entries += len % 29 ? len / 29 + 1 : len / 29;
913 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
914 txbuff->indir_arr + 1);
917 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
919 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
920 int queue_num = skb_get_queue_mapping(skb);
921 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
922 struct device *dev = &adapter->vdev->dev;
923 struct ibmvnic_tx_buff *tx_buff = NULL;
924 struct ibmvnic_sub_crq_queue *tx_scrq;
925 struct ibmvnic_tx_pool *tx_pool;
926 unsigned int tx_send_failed = 0;
927 unsigned int tx_map_failed = 0;
928 unsigned int tx_dropped = 0;
929 unsigned int tx_packets = 0;
930 unsigned int tx_bytes = 0;
931 dma_addr_t data_dma_addr;
932 struct netdev_queue *txq;
933 unsigned long lpar_rc;
934 union sub_crq tx_crq;
942 tx_pool = &adapter->tx_pool[queue_num];
943 tx_scrq = adapter->tx_scrq[queue_num];
944 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
945 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
946 be32_to_cpu(adapter->login_rsp_buf->
947 off_txsubm_subcrqs));
948 if (adapter->resetting) {
949 if (!netif_subqueue_stopped(netdev, skb))
950 netif_stop_subqueue(netdev, queue_num);
951 dev_kfree_skb_any(skb);
959 index = tx_pool->free_map[tx_pool->consumer_index];
960 offset = index * adapter->req_mtu;
961 dst = tx_pool->long_term_buff.buff + offset;
962 memset(dst, 0, adapter->req_mtu);
963 skb_copy_from_linear_data(skb, dst, skb->len);
964 data_dma_addr = tx_pool->long_term_buff.addr + offset;
966 tx_pool->consumer_index =
967 (tx_pool->consumer_index + 1) %
968 adapter->req_tx_entries_per_subcrq;
970 tx_buff = &tx_pool->tx_buff[index];
972 tx_buff->data_dma[0] = data_dma_addr;
973 tx_buff->data_len[0] = skb->len;
974 tx_buff->index = index;
975 tx_buff->pool_index = queue_num;
976 tx_buff->last_frag = true;
978 memset(&tx_crq, 0, sizeof(tx_crq));
979 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
980 tx_crq.v1.type = IBMVNIC_TX_DESC;
981 tx_crq.v1.n_crq_elem = 1;
983 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
984 tx_crq.v1.correlator = cpu_to_be32(index);
985 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
986 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
987 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
989 if (adapter->vlan_header_insertion) {
990 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
991 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
994 if (skb->protocol == htons(ETH_P_IP)) {
995 if (ip_hdr(skb)->version == 4)
996 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
997 else if (ip_hdr(skb)->version == 6)
998 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1000 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1001 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1002 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1003 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1006 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1007 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1010 /* determine if l2/3/4 headers are sent to firmware */
1011 if ((*hdrs >> 7) & 1 &&
1012 (skb->protocol == htons(ETH_P_IP) ||
1013 skb->protocol == htons(ETH_P_IPV6))) {
1014 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1015 tx_crq.v1.n_crq_elem = num_entries;
1016 tx_buff->indir_arr[0] = tx_crq;
1017 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1018 sizeof(tx_buff->indir_arr),
1020 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1021 dev_kfree_skb_any(skb);
1022 tx_buff->skb = NULL;
1023 if (!firmware_has_feature(FW_FEATURE_CMO))
1024 dev_err(dev, "tx: unable to map descriptor array\n");
1030 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1031 (u64)tx_buff->indir_dma,
1034 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1037 if (lpar_rc != H_SUCCESS) {
1038 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1040 if (tx_pool->consumer_index == 0)
1041 tx_pool->consumer_index =
1042 adapter->req_tx_entries_per_subcrq - 1;
1044 tx_pool->consumer_index--;
1046 dev_kfree_skb_any(skb);
1047 tx_buff->skb = NULL;
1049 if (lpar_rc == H_CLOSED)
1050 netif_stop_subqueue(netdev, queue_num);
1058 if (atomic_inc_return(&tx_scrq->used)
1059 >= adapter->req_tx_entries_per_subcrq) {
1060 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1061 netif_stop_subqueue(netdev, queue_num);
1065 tx_bytes += skb->len;
1066 txq->trans_start = jiffies;
1070 netdev->stats.tx_dropped += tx_dropped;
1071 netdev->stats.tx_bytes += tx_bytes;
1072 netdev->stats.tx_packets += tx_packets;
1073 adapter->tx_send_failed += tx_send_failed;
1074 adapter->tx_map_failed += tx_map_failed;
1079 static void ibmvnic_set_multi(struct net_device *netdev)
1081 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1082 struct netdev_hw_addr *ha;
1083 union ibmvnic_crq crq;
1085 memset(&crq, 0, sizeof(crq));
1086 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1087 crq.request_capability.cmd = REQUEST_CAPABILITY;
1089 if (netdev->flags & IFF_PROMISC) {
1090 if (!adapter->promisc_supported)
1093 if (netdev->flags & IFF_ALLMULTI) {
1094 /* Accept all multicast */
1095 memset(&crq, 0, sizeof(crq));
1096 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1097 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1098 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1099 ibmvnic_send_crq(adapter, &crq);
1100 } else if (netdev_mc_empty(netdev)) {
1101 /* Reject all multicast */
1102 memset(&crq, 0, sizeof(crq));
1103 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1104 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1105 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1106 ibmvnic_send_crq(adapter, &crq);
1108 /* Accept one or more multicast(s) */
1109 netdev_for_each_mc_addr(ha, netdev) {
1110 memset(&crq, 0, sizeof(crq));
1111 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1112 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1113 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1114 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1116 ibmvnic_send_crq(adapter, &crq);
1122 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1124 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1125 struct sockaddr *addr = p;
1126 union ibmvnic_crq crq;
1128 if (!is_valid_ether_addr(addr->sa_data))
1129 return -EADDRNOTAVAIL;
1131 memset(&crq, 0, sizeof(crq));
1132 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1133 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1134 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1135 ibmvnic_send_crq(adapter, &crq);
1136 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1141 * do_reset returns zero if we are able to keep processing reset events, or
1142 * non-zero if we hit a fatal error and must halt.
1144 static int do_reset(struct ibmvnic_adapter *adapter,
1145 struct ibmvnic_rwi *rwi, u32 reset_state)
1147 struct net_device *netdev = adapter->netdev;
1150 netif_carrier_off(netdev);
1151 adapter->reset_reason = rwi->reset_reason;
1153 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1154 rc = ibmvnic_reenable_crq_queue(adapter);
1159 rc = __ibmvnic_close(netdev);
1163 /* remove the closed state so when we call open it appears
1164 * we are coming from the probed state.
1166 adapter->state = VNIC_PROBED;
1168 release_resources(adapter);
1169 release_sub_crqs(adapter);
1170 release_crq_queue(adapter);
1172 rc = ibmvnic_init(adapter);
1176 /* If the adapter was in PROBE state prior to the reset, exit here. */
1177 if (reset_state == VNIC_PROBED)
1180 rc = ibmvnic_login(netdev);
1182 adapter->state = VNIC_PROBED;
1187 rc = init_resources(adapter);
1192 if (reset_state == VNIC_CLOSED)
1195 rc = __ibmvnic_open(netdev);
1197 if (list_empty(&adapter->rwi_list))
1198 adapter->state = VNIC_CLOSED;
1200 adapter->state = reset_state;
1205 netif_carrier_on(netdev);
1208 for (i = 0; i < adapter->req_rx_queues; i++)
1209 napi_schedule(&adapter->napi[i]);
1214 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1216 struct ibmvnic_rwi *rwi;
1218 mutex_lock(&adapter->rwi_lock);
1220 if (!list_empty(&adapter->rwi_list)) {
1221 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1223 list_del(&rwi->list);
1228 mutex_unlock(&adapter->rwi_lock);
1232 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1234 struct ibmvnic_rwi *rwi;
1236 rwi = get_next_rwi(adapter);
1239 rwi = get_next_rwi(adapter);
1243 static void __ibmvnic_reset(struct work_struct *work)
1245 struct ibmvnic_rwi *rwi;
1246 struct ibmvnic_adapter *adapter;
1247 struct net_device *netdev;
1251 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1252 netdev = adapter->netdev;
1254 mutex_lock(&adapter->reset_lock);
1255 adapter->resetting = true;
1256 reset_state = adapter->state;
1258 rwi = get_next_rwi(adapter);
1260 rc = do_reset(adapter, rwi, reset_state);
1265 rwi = get_next_rwi(adapter);
1269 free_all_rwi(adapter);
1273 adapter->resetting = false;
1274 mutex_unlock(&adapter->reset_lock);
1277 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1278 enum ibmvnic_reset_reason reason)
1280 struct ibmvnic_rwi *rwi, *tmp;
1281 struct net_device *netdev = adapter->netdev;
1282 struct list_head *entry;
1284 if (adapter->state == VNIC_REMOVING ||
1285 adapter->state == VNIC_REMOVED) {
1286 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1290 mutex_lock(&adapter->rwi_lock);
1292 list_for_each(entry, &adapter->rwi_list) {
1293 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1294 if (tmp->reset_reason == reason) {
1295 netdev_err(netdev, "Matching reset found, skipping\n");
1296 mutex_unlock(&adapter->rwi_lock);
1301 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1303 mutex_unlock(&adapter->rwi_lock);
1304 ibmvnic_close(netdev);
1308 rwi->reset_reason = reason;
1309 list_add_tail(&rwi->list, &adapter->rwi_list);
1310 mutex_unlock(&adapter->rwi_lock);
1311 schedule_work(&adapter->ibmvnic_reset);
1314 static void ibmvnic_tx_timeout(struct net_device *dev)
1316 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1318 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1321 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1322 struct ibmvnic_rx_buff *rx_buff)
1324 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1326 rx_buff->skb = NULL;
1328 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1329 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1331 atomic_dec(&pool->available);
1334 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1336 struct net_device *netdev = napi->dev;
1337 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1338 int scrq_num = (int)(napi - adapter->napi);
1339 int frames_processed = 0;
1341 while (frames_processed < budget) {
1342 struct sk_buff *skb;
1343 struct ibmvnic_rx_buff *rx_buff;
1344 union sub_crq *next;
1349 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1351 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1353 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1354 rx_comp.correlator);
1355 /* do error checking */
1356 if (next->rx_comp.rc) {
1357 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1358 /* free the entry */
1359 next->rx_comp.first = 0;
1360 remove_buff_from_pool(adapter, rx_buff);
1364 length = be32_to_cpu(next->rx_comp.len);
1365 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1366 flags = next->rx_comp.flags;
1368 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1371 /* VLAN Header has been stripped by the system firmware and
1372 * needs to be inserted by the driver
1374 if (adapter->rx_vlan_header_insertion &&
1375 (flags & IBMVNIC_VLAN_STRIPPED))
1376 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1377 ntohs(next->rx_comp.vlan_tci));
1379 /* free the entry */
1380 next->rx_comp.first = 0;
1381 remove_buff_from_pool(adapter, rx_buff);
1383 skb_put(skb, length);
1384 skb->protocol = eth_type_trans(skb, netdev);
1386 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1387 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1388 skb->ip_summed = CHECKSUM_UNNECESSARY;
1392 napi_gro_receive(napi, skb); /* send it up */
1393 netdev->stats.rx_packets++;
1394 netdev->stats.rx_bytes += length;
1397 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1399 if (frames_processed < budget) {
1400 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1401 napi_complete_done(napi, frames_processed);
1402 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1403 napi_reschedule(napi)) {
1404 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1408 return frames_processed;
1411 #ifdef CONFIG_NET_POLL_CONTROLLER
1412 static void ibmvnic_netpoll_controller(struct net_device *dev)
1414 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1417 replenish_pools(netdev_priv(dev));
1418 for (i = 0; i < adapter->req_rx_queues; i++)
1419 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1420 adapter->rx_scrq[i]);
1424 static const struct net_device_ops ibmvnic_netdev_ops = {
1425 .ndo_open = ibmvnic_open,
1426 .ndo_stop = ibmvnic_close,
1427 .ndo_start_xmit = ibmvnic_xmit,
1428 .ndo_set_rx_mode = ibmvnic_set_multi,
1429 .ndo_set_mac_address = ibmvnic_set_mac,
1430 .ndo_validate_addr = eth_validate_addr,
1431 .ndo_tx_timeout = ibmvnic_tx_timeout,
1432 #ifdef CONFIG_NET_POLL_CONTROLLER
1433 .ndo_poll_controller = ibmvnic_netpoll_controller,
1437 /* ethtool functions */
1439 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1440 struct ethtool_link_ksettings *cmd)
1442 u32 supported, advertising;
1444 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1446 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1448 cmd->base.speed = SPEED_1000;
1449 cmd->base.duplex = DUPLEX_FULL;
1450 cmd->base.port = PORT_FIBRE;
1451 cmd->base.phy_address = 0;
1452 cmd->base.autoneg = AUTONEG_ENABLE;
1454 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1456 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1462 static void ibmvnic_get_drvinfo(struct net_device *dev,
1463 struct ethtool_drvinfo *info)
1465 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1466 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1469 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1471 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1473 return adapter->msg_enable;
1476 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1478 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1480 adapter->msg_enable = data;
1483 static u32 ibmvnic_get_link(struct net_device *netdev)
1485 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1487 /* Don't need to send a query because we request a logical link up at
1488 * init and then we wait for link state indications
1490 return adapter->logical_link_state;
1493 static void ibmvnic_get_ringparam(struct net_device *netdev,
1494 struct ethtool_ringparam *ring)
1496 ring->rx_max_pending = 0;
1497 ring->tx_max_pending = 0;
1498 ring->rx_mini_max_pending = 0;
1499 ring->rx_jumbo_max_pending = 0;
1500 ring->rx_pending = 0;
1501 ring->tx_pending = 0;
1502 ring->rx_mini_pending = 0;
1503 ring->rx_jumbo_pending = 0;
1506 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1510 if (stringset != ETH_SS_STATS)
1513 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1514 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1517 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1521 return ARRAY_SIZE(ibmvnic_stats);
1527 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1528 struct ethtool_stats *stats, u64 *data)
1530 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1531 union ibmvnic_crq crq;
1534 memset(&crq, 0, sizeof(crq));
1535 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1536 crq.request_statistics.cmd = REQUEST_STATISTICS;
1537 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1538 crq.request_statistics.len =
1539 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1541 /* Wait for data to be written */
1542 init_completion(&adapter->stats_done);
1543 ibmvnic_send_crq(adapter, &crq);
1544 wait_for_completion(&adapter->stats_done);
1546 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1547 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1550 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1551 .get_drvinfo = ibmvnic_get_drvinfo,
1552 .get_msglevel = ibmvnic_get_msglevel,
1553 .set_msglevel = ibmvnic_set_msglevel,
1554 .get_link = ibmvnic_get_link,
1555 .get_ringparam = ibmvnic_get_ringparam,
1556 .get_strings = ibmvnic_get_strings,
1557 .get_sset_count = ibmvnic_get_sset_count,
1558 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1559 .get_link_ksettings = ibmvnic_get_link_ksettings,
1562 /* Routines for managing CRQs/sCRQs */
1564 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1565 struct ibmvnic_sub_crq_queue *scrq)
1567 struct device *dev = &adapter->vdev->dev;
1570 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1572 /* Close the sub-crqs */
1574 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1575 adapter->vdev->unit_address,
1577 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1580 netdev_err(adapter->netdev,
1581 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1585 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1587 free_pages((unsigned long)scrq->msgs, 2);
1591 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1594 struct device *dev = &adapter->vdev->dev;
1595 struct ibmvnic_sub_crq_queue *scrq;
1598 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
1603 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
1605 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1606 goto zero_page_failed;
1609 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1611 if (dma_mapping_error(dev, scrq->msg_token)) {
1612 dev_warn(dev, "Couldn't map crq queue messages page\n");
1616 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1617 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1619 if (rc == H_RESOURCE)
1620 rc = ibmvnic_reset_crq(adapter);
1622 if (rc == H_CLOSED) {
1623 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1625 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1629 scrq->adapter = adapter;
1630 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1631 spin_lock_init(&scrq->lock);
1633 netdev_dbg(adapter->netdev,
1634 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1635 scrq->crq_num, scrq->hw_irq, scrq->irq);
1640 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1643 free_pages((unsigned long)scrq->msgs, 2);
1650 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1654 if (adapter->tx_scrq) {
1655 for (i = 0; i < adapter->req_tx_queues; i++) {
1656 if (!adapter->tx_scrq[i])
1659 if (adapter->tx_scrq[i]->irq) {
1660 free_irq(adapter->tx_scrq[i]->irq,
1661 adapter->tx_scrq[i]);
1662 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1663 adapter->tx_scrq[i]->irq = 0;
1666 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1669 kfree(adapter->tx_scrq);
1670 adapter->tx_scrq = NULL;
1673 if (adapter->rx_scrq) {
1674 for (i = 0; i < adapter->req_rx_queues; i++) {
1675 if (!adapter->rx_scrq[i])
1678 if (adapter->rx_scrq[i]->irq) {
1679 free_irq(adapter->rx_scrq[i]->irq,
1680 adapter->rx_scrq[i]);
1681 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1682 adapter->rx_scrq[i]->irq = 0;
1685 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1688 kfree(adapter->rx_scrq);
1689 adapter->rx_scrq = NULL;
1693 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1694 struct ibmvnic_sub_crq_queue *scrq)
1696 struct device *dev = &adapter->vdev->dev;
1699 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1700 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1702 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1707 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1708 struct ibmvnic_sub_crq_queue *scrq)
1710 struct device *dev = &adapter->vdev->dev;
1713 if (scrq->hw_irq > 0x100000000ULL) {
1714 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1718 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1719 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1721 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1726 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1727 struct ibmvnic_sub_crq_queue *scrq)
1729 struct device *dev = &adapter->vdev->dev;
1730 struct ibmvnic_tx_buff *txbuff;
1731 union sub_crq *next;
1737 while (pending_scrq(adapter, scrq)) {
1738 unsigned int pool = scrq->pool_index;
1740 next = ibmvnic_next_scrq(adapter, scrq);
1741 for (i = 0; i < next->tx_comp.num_comps; i++) {
1742 if (next->tx_comp.rcs[i]) {
1743 dev_err(dev, "tx error %x\n",
1744 next->tx_comp.rcs[i]);
1747 index = be32_to_cpu(next->tx_comp.correlators[i]);
1748 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1750 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1751 if (!txbuff->data_dma[j])
1754 txbuff->data_dma[j] = 0;
1756 /* if sub_crq was sent indirectly */
1757 first = txbuff->indir_arr[0].generic.first;
1758 if (first == IBMVNIC_CRQ_CMD) {
1759 dma_unmap_single(dev, txbuff->indir_dma,
1760 sizeof(txbuff->indir_arr),
1764 if (txbuff->last_frag) {
1765 if (atomic_sub_return(next->tx_comp.num_comps,
1767 (adapter->req_tx_entries_per_subcrq / 2) &&
1768 netif_subqueue_stopped(adapter->netdev,
1770 netif_wake_subqueue(adapter->netdev,
1772 netdev_dbg(adapter->netdev,
1773 "Started queue %d\n",
1777 dev_kfree_skb_any(txbuff->skb);
1780 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1781 producer_index] = index;
1782 adapter->tx_pool[pool].producer_index =
1783 (adapter->tx_pool[pool].producer_index + 1) %
1784 adapter->req_tx_entries_per_subcrq;
1786 /* remove tx_comp scrq*/
1787 next->tx_comp.first = 0;
1790 enable_scrq_irq(adapter, scrq);
1792 if (pending_scrq(adapter, scrq)) {
1793 disable_scrq_irq(adapter, scrq);
1800 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1802 struct ibmvnic_sub_crq_queue *scrq = instance;
1803 struct ibmvnic_adapter *adapter = scrq->adapter;
1805 disable_scrq_irq(adapter, scrq);
1806 ibmvnic_complete_tx(adapter, scrq);
1811 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1813 struct ibmvnic_sub_crq_queue *scrq = instance;
1814 struct ibmvnic_adapter *adapter = scrq->adapter;
1816 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1817 disable_scrq_irq(adapter, scrq);
1818 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1824 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1826 struct device *dev = &adapter->vdev->dev;
1827 struct ibmvnic_sub_crq_queue *scrq;
1831 for (i = 0; i < adapter->req_tx_queues; i++) {
1832 scrq = adapter->tx_scrq[i];
1833 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1837 dev_err(dev, "Error mapping irq\n");
1838 goto req_tx_irq_failed;
1841 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1842 0, "ibmvnic_tx", scrq);
1845 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1847 irq_dispose_mapping(scrq->irq);
1848 goto req_rx_irq_failed;
1852 for (i = 0; i < adapter->req_rx_queues; i++) {
1853 scrq = adapter->rx_scrq[i];
1854 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1857 dev_err(dev, "Error mapping irq\n");
1858 goto req_rx_irq_failed;
1860 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1861 0, "ibmvnic_rx", scrq);
1863 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1865 irq_dispose_mapping(scrq->irq);
1866 goto req_rx_irq_failed;
1872 for (j = 0; j < i; j++) {
1873 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1874 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1876 i = adapter->req_tx_queues;
1878 for (j = 0; j < i; j++) {
1879 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1880 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1882 release_sub_crqs(adapter);
1886 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
1888 struct device *dev = &adapter->vdev->dev;
1889 struct ibmvnic_sub_crq_queue **allqueues;
1890 int registered_queues = 0;
1895 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1897 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
1901 for (i = 0; i < total_queues; i++) {
1902 allqueues[i] = init_sub_crq_queue(adapter);
1903 if (!allqueues[i]) {
1904 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1907 registered_queues++;
1910 /* Make sure we were able to register the minimum number of queues */
1911 if (registered_queues <
1912 adapter->min_tx_queues + adapter->min_rx_queues) {
1913 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1917 /* Distribute the failed allocated queues*/
1918 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1919 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1922 if (adapter->req_rx_queues > adapter->min_rx_queues)
1923 adapter->req_rx_queues--;
1928 if (adapter->req_tx_queues > adapter->min_tx_queues)
1929 adapter->req_tx_queues--;
1936 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1937 sizeof(*adapter->tx_scrq), GFP_KERNEL);
1938 if (!adapter->tx_scrq)
1941 for (i = 0; i < adapter->req_tx_queues; i++) {
1942 adapter->tx_scrq[i] = allqueues[i];
1943 adapter->tx_scrq[i]->pool_index = i;
1946 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1947 sizeof(*adapter->rx_scrq), GFP_KERNEL);
1948 if (!adapter->rx_scrq)
1951 for (i = 0; i < adapter->req_rx_queues; i++) {
1952 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1953 adapter->rx_scrq[i]->scrq_num = i;
1960 kfree(adapter->tx_scrq);
1961 adapter->tx_scrq = NULL;
1963 for (i = 0; i < registered_queues; i++)
1964 release_sub_crq_queue(adapter, allqueues[i]);
1969 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
1971 struct device *dev = &adapter->vdev->dev;
1972 union ibmvnic_crq crq;
1975 /* Sub-CRQ entries are 32 byte long */
1976 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1978 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1979 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1980 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1984 /* Get the minimum between the queried max and the entries
1985 * that fit in our PAGE_SIZE
1987 adapter->req_tx_entries_per_subcrq =
1988 adapter->max_tx_entries_per_subcrq > entries_page ?
1989 entries_page : adapter->max_tx_entries_per_subcrq;
1990 adapter->req_rx_add_entries_per_subcrq =
1991 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1992 entries_page : adapter->max_rx_add_entries_per_subcrq;
1994 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1995 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1996 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1998 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2001 memset(&crq, 0, sizeof(crq));
2002 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2003 crq.request_capability.cmd = REQUEST_CAPABILITY;
2005 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2006 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2007 atomic_inc(&adapter->running_cap_crqs);
2008 ibmvnic_send_crq(adapter, &crq);
2010 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2011 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2012 atomic_inc(&adapter->running_cap_crqs);
2013 ibmvnic_send_crq(adapter, &crq);
2015 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2016 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2017 atomic_inc(&adapter->running_cap_crqs);
2018 ibmvnic_send_crq(adapter, &crq);
2020 crq.request_capability.capability =
2021 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2022 crq.request_capability.number =
2023 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2024 atomic_inc(&adapter->running_cap_crqs);
2025 ibmvnic_send_crq(adapter, &crq);
2027 crq.request_capability.capability =
2028 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2029 crq.request_capability.number =
2030 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2031 atomic_inc(&adapter->running_cap_crqs);
2032 ibmvnic_send_crq(adapter, &crq);
2034 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2035 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2036 atomic_inc(&adapter->running_cap_crqs);
2037 ibmvnic_send_crq(adapter, &crq);
2039 if (adapter->netdev->flags & IFF_PROMISC) {
2040 if (adapter->promisc_supported) {
2041 crq.request_capability.capability =
2042 cpu_to_be16(PROMISC_REQUESTED);
2043 crq.request_capability.number = cpu_to_be64(1);
2044 atomic_inc(&adapter->running_cap_crqs);
2045 ibmvnic_send_crq(adapter, &crq);
2048 crq.request_capability.capability =
2049 cpu_to_be16(PROMISC_REQUESTED);
2050 crq.request_capability.number = cpu_to_be64(0);
2051 atomic_inc(&adapter->running_cap_crqs);
2052 ibmvnic_send_crq(adapter, &crq);
2056 static int pending_scrq(struct ibmvnic_adapter *adapter,
2057 struct ibmvnic_sub_crq_queue *scrq)
2059 union sub_crq *entry = &scrq->msgs[scrq->cur];
2061 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
2062 adapter->state == VNIC_CLOSING)
2068 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2069 struct ibmvnic_sub_crq_queue *scrq)
2071 union sub_crq *entry;
2072 unsigned long flags;
2074 spin_lock_irqsave(&scrq->lock, flags);
2075 entry = &scrq->msgs[scrq->cur];
2076 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2077 if (++scrq->cur == scrq->size)
2082 spin_unlock_irqrestore(&scrq->lock, flags);
2087 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2089 struct ibmvnic_crq_queue *queue = &adapter->crq;
2090 union ibmvnic_crq *crq;
2092 crq = &queue->msgs[queue->cur];
2093 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2094 if (++queue->cur == queue->size)
2103 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2104 union sub_crq *sub_crq)
2106 unsigned int ua = adapter->vdev->unit_address;
2107 struct device *dev = &adapter->vdev->dev;
2108 u64 *u64_crq = (u64 *)sub_crq;
2111 netdev_dbg(adapter->netdev,
2112 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2113 (unsigned long int)cpu_to_be64(remote_handle),
2114 (unsigned long int)cpu_to_be64(u64_crq[0]),
2115 (unsigned long int)cpu_to_be64(u64_crq[1]),
2116 (unsigned long int)cpu_to_be64(u64_crq[2]),
2117 (unsigned long int)cpu_to_be64(u64_crq[3]));
2119 /* Make sure the hypervisor sees the complete request */
2122 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2123 cpu_to_be64(remote_handle),
2124 cpu_to_be64(u64_crq[0]),
2125 cpu_to_be64(u64_crq[1]),
2126 cpu_to_be64(u64_crq[2]),
2127 cpu_to_be64(u64_crq[3]));
2131 dev_warn(dev, "CRQ Queue closed\n");
2132 dev_err(dev, "Send error (rc=%d)\n", rc);
2138 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2139 u64 remote_handle, u64 ioba, u64 num_entries)
2141 unsigned int ua = adapter->vdev->unit_address;
2142 struct device *dev = &adapter->vdev->dev;
2145 /* Make sure the hypervisor sees the complete request */
2147 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2148 cpu_to_be64(remote_handle),
2153 dev_warn(dev, "CRQ Queue closed\n");
2154 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2160 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2161 union ibmvnic_crq *crq)
2163 unsigned int ua = adapter->vdev->unit_address;
2164 struct device *dev = &adapter->vdev->dev;
2165 u64 *u64_crq = (u64 *)crq;
2168 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2169 (unsigned long int)cpu_to_be64(u64_crq[0]),
2170 (unsigned long int)cpu_to_be64(u64_crq[1]));
2172 /* Make sure the hypervisor sees the complete request */
2175 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2176 cpu_to_be64(u64_crq[0]),
2177 cpu_to_be64(u64_crq[1]));
2181 dev_warn(dev, "CRQ Queue closed\n");
2182 dev_warn(dev, "Send error (rc=%d)\n", rc);
2188 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
2190 union ibmvnic_crq crq;
2192 memset(&crq, 0, sizeof(crq));
2193 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2194 crq.generic.cmd = IBMVNIC_CRQ_INIT;
2195 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
2197 return ibmvnic_send_crq(adapter, &crq);
2200 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2202 union ibmvnic_crq crq;
2204 memset(&crq, 0, sizeof(crq));
2205 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2206 crq.version_exchange.cmd = VERSION_EXCHANGE;
2207 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2209 return ibmvnic_send_crq(adapter, &crq);
2212 static void send_login(struct ibmvnic_adapter *adapter)
2214 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2215 struct ibmvnic_login_buffer *login_buffer;
2216 struct device *dev = &adapter->vdev->dev;
2217 dma_addr_t rsp_buffer_token;
2218 dma_addr_t buffer_token;
2219 size_t rsp_buffer_size;
2220 union ibmvnic_crq crq;
2227 sizeof(struct ibmvnic_login_buffer) +
2228 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2230 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2232 goto buf_alloc_failed;
2234 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2236 if (dma_mapping_error(dev, buffer_token)) {
2237 dev_err(dev, "Couldn't map login buffer\n");
2238 goto buf_map_failed;
2241 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2242 sizeof(u64) * adapter->req_tx_queues +
2243 sizeof(u64) * adapter->req_rx_queues +
2244 sizeof(u64) * adapter->req_rx_queues +
2245 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
2247 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2248 if (!login_rsp_buffer)
2249 goto buf_rsp_alloc_failed;
2251 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2252 rsp_buffer_size, DMA_FROM_DEVICE);
2253 if (dma_mapping_error(dev, rsp_buffer_token)) {
2254 dev_err(dev, "Couldn't map login rsp buffer\n");
2255 goto buf_rsp_map_failed;
2258 adapter->login_buf = login_buffer;
2259 adapter->login_buf_token = buffer_token;
2260 adapter->login_buf_sz = buffer_size;
2261 adapter->login_rsp_buf = login_rsp_buffer;
2262 adapter->login_rsp_buf_token = rsp_buffer_token;
2263 adapter->login_rsp_buf_sz = rsp_buffer_size;
2265 login_buffer->len = cpu_to_be32(buffer_size);
2266 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2267 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2268 login_buffer->off_txcomp_subcrqs =
2269 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2270 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2271 login_buffer->off_rxcomp_subcrqs =
2272 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2273 sizeof(u64) * adapter->req_tx_queues);
2274 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2275 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2277 tx_list_p = (__be64 *)((char *)login_buffer +
2278 sizeof(struct ibmvnic_login_buffer));
2279 rx_list_p = (__be64 *)((char *)login_buffer +
2280 sizeof(struct ibmvnic_login_buffer) +
2281 sizeof(u64) * adapter->req_tx_queues);
2283 for (i = 0; i < adapter->req_tx_queues; i++) {
2284 if (adapter->tx_scrq[i]) {
2285 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2290 for (i = 0; i < adapter->req_rx_queues; i++) {
2291 if (adapter->rx_scrq[i]) {
2292 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2297 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2298 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2299 netdev_dbg(adapter->netdev, "%016lx\n",
2300 ((unsigned long int *)(adapter->login_buf))[i]);
2303 memset(&crq, 0, sizeof(crq));
2304 crq.login.first = IBMVNIC_CRQ_CMD;
2305 crq.login.cmd = LOGIN;
2306 crq.login.ioba = cpu_to_be32(buffer_token);
2307 crq.login.len = cpu_to_be32(buffer_size);
2308 ibmvnic_send_crq(adapter, &crq);
2313 kfree(login_rsp_buffer);
2314 buf_rsp_alloc_failed:
2315 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2317 kfree(login_buffer);
2322 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2325 union ibmvnic_crq crq;
2327 memset(&crq, 0, sizeof(crq));
2328 crq.request_map.first = IBMVNIC_CRQ_CMD;
2329 crq.request_map.cmd = REQUEST_MAP;
2330 crq.request_map.map_id = map_id;
2331 crq.request_map.ioba = cpu_to_be32(addr);
2332 crq.request_map.len = cpu_to_be32(len);
2333 ibmvnic_send_crq(adapter, &crq);
2336 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2338 union ibmvnic_crq crq;
2340 memset(&crq, 0, sizeof(crq));
2341 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2342 crq.request_unmap.cmd = REQUEST_UNMAP;
2343 crq.request_unmap.map_id = map_id;
2344 ibmvnic_send_crq(adapter, &crq);
2347 static void send_map_query(struct ibmvnic_adapter *adapter)
2349 union ibmvnic_crq crq;
2351 memset(&crq, 0, sizeof(crq));
2352 crq.query_map.first = IBMVNIC_CRQ_CMD;
2353 crq.query_map.cmd = QUERY_MAP;
2354 ibmvnic_send_crq(adapter, &crq);
2357 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2358 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2360 union ibmvnic_crq crq;
2362 atomic_set(&adapter->running_cap_crqs, 0);
2363 memset(&crq, 0, sizeof(crq));
2364 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2365 crq.query_capability.cmd = QUERY_CAPABILITY;
2367 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2368 atomic_inc(&adapter->running_cap_crqs);
2369 ibmvnic_send_crq(adapter, &crq);
2371 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2372 atomic_inc(&adapter->running_cap_crqs);
2373 ibmvnic_send_crq(adapter, &crq);
2375 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2376 atomic_inc(&adapter->running_cap_crqs);
2377 ibmvnic_send_crq(adapter, &crq);
2379 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2380 atomic_inc(&adapter->running_cap_crqs);
2381 ibmvnic_send_crq(adapter, &crq);
2383 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2384 atomic_inc(&adapter->running_cap_crqs);
2385 ibmvnic_send_crq(adapter, &crq);
2387 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2388 atomic_inc(&adapter->running_cap_crqs);
2389 ibmvnic_send_crq(adapter, &crq);
2391 crq.query_capability.capability =
2392 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2393 atomic_inc(&adapter->running_cap_crqs);
2394 ibmvnic_send_crq(adapter, &crq);
2396 crq.query_capability.capability =
2397 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2398 atomic_inc(&adapter->running_cap_crqs);
2399 ibmvnic_send_crq(adapter, &crq);
2401 crq.query_capability.capability =
2402 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2403 atomic_inc(&adapter->running_cap_crqs);
2404 ibmvnic_send_crq(adapter, &crq);
2406 crq.query_capability.capability =
2407 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2408 atomic_inc(&adapter->running_cap_crqs);
2409 ibmvnic_send_crq(adapter, &crq);
2411 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2412 atomic_inc(&adapter->running_cap_crqs);
2413 ibmvnic_send_crq(adapter, &crq);
2415 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2416 atomic_inc(&adapter->running_cap_crqs);
2417 ibmvnic_send_crq(adapter, &crq);
2419 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2420 atomic_inc(&adapter->running_cap_crqs);
2421 ibmvnic_send_crq(adapter, &crq);
2423 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2424 atomic_inc(&adapter->running_cap_crqs);
2425 ibmvnic_send_crq(adapter, &crq);
2427 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2428 atomic_inc(&adapter->running_cap_crqs);
2429 ibmvnic_send_crq(adapter, &crq);
2431 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2432 atomic_inc(&adapter->running_cap_crqs);
2433 ibmvnic_send_crq(adapter, &crq);
2435 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2436 atomic_inc(&adapter->running_cap_crqs);
2437 ibmvnic_send_crq(adapter, &crq);
2439 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2440 atomic_inc(&adapter->running_cap_crqs);
2441 ibmvnic_send_crq(adapter, &crq);
2443 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2444 atomic_inc(&adapter->running_cap_crqs);
2445 ibmvnic_send_crq(adapter, &crq);
2447 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2448 atomic_inc(&adapter->running_cap_crqs);
2449 ibmvnic_send_crq(adapter, &crq);
2451 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2452 atomic_inc(&adapter->running_cap_crqs);
2453 ibmvnic_send_crq(adapter, &crq);
2455 crq.query_capability.capability =
2456 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2457 atomic_inc(&adapter->running_cap_crqs);
2458 ibmvnic_send_crq(adapter, &crq);
2460 crq.query_capability.capability =
2461 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2462 atomic_inc(&adapter->running_cap_crqs);
2463 ibmvnic_send_crq(adapter, &crq);
2465 crq.query_capability.capability =
2466 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2467 atomic_inc(&adapter->running_cap_crqs);
2468 ibmvnic_send_crq(adapter, &crq);
2470 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2471 atomic_inc(&adapter->running_cap_crqs);
2472 ibmvnic_send_crq(adapter, &crq);
2475 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2477 struct device *dev = &adapter->vdev->dev;
2478 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2479 union ibmvnic_crq crq;
2482 dma_unmap_single(dev, adapter->ip_offload_tok,
2483 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2485 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2486 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2487 netdev_dbg(adapter->netdev, "%016lx\n",
2488 ((unsigned long int *)(buf))[i]);
2490 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2491 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2492 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2493 buf->tcp_ipv4_chksum);
2494 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2495 buf->tcp_ipv6_chksum);
2496 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2497 buf->udp_ipv4_chksum);
2498 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2499 buf->udp_ipv6_chksum);
2500 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2501 buf->large_tx_ipv4);
2502 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2503 buf->large_tx_ipv6);
2504 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2505 buf->large_rx_ipv4);
2506 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2507 buf->large_rx_ipv6);
2508 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2509 buf->max_ipv4_header_size);
2510 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2511 buf->max_ipv6_header_size);
2512 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2513 buf->max_tcp_header_size);
2514 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2515 buf->max_udp_header_size);
2516 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2517 buf->max_large_tx_size);
2518 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2519 buf->max_large_rx_size);
2520 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2521 buf->ipv6_extension_header);
2522 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2523 buf->tcp_pseudosum_req);
2524 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2525 buf->num_ipv6_ext_headers);
2526 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2527 buf->off_ipv6_ext_headers);
2529 adapter->ip_offload_ctrl_tok =
2530 dma_map_single(dev, &adapter->ip_offload_ctrl,
2531 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2533 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2534 dev_err(dev, "Couldn't map ip offload control buffer\n");
2538 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2539 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2540 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2541 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2542 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2544 /* large_tx/rx disabled for now, additional features needed */
2545 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2546 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2547 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2548 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2550 adapter->netdev->features = NETIF_F_GSO;
2552 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2553 adapter->netdev->features |= NETIF_F_IP_CSUM;
2555 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2556 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2558 if ((adapter->netdev->features &
2559 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2560 adapter->netdev->features |= NETIF_F_RXCSUM;
2562 memset(&crq, 0, sizeof(crq));
2563 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2564 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2565 crq.control_ip_offload.len =
2566 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2567 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2568 ibmvnic_send_crq(adapter, &crq);
2571 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2572 struct ibmvnic_adapter *adapter)
2574 struct device *dev = &adapter->vdev->dev;
2575 struct ibmvnic_error_buff *error_buff, *tmp;
2576 unsigned long flags;
2580 if (!crq->request_error_rsp.rc.code) {
2581 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2582 crq->request_error_rsp.rc.code);
2586 spin_lock_irqsave(&adapter->error_list_lock, flags);
2587 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2588 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2590 list_del(&error_buff->list);
2593 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2596 dev_err(dev, "Couldn't find error id %x\n",
2597 be32_to_cpu(crq->request_error_rsp.error_id));
2601 dev_err(dev, "Detailed info for error id %x:",
2602 be32_to_cpu(crq->request_error_rsp.error_id));
2604 for (i = 0; i < error_buff->len; i++) {
2605 pr_cont("%02x", (int)error_buff->buff[i]);
2611 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2613 kfree(error_buff->buff);
2617 static void request_error_information(struct ibmvnic_adapter *adapter,
2618 union ibmvnic_crq *err_crq)
2620 struct device *dev = &adapter->vdev->dev;
2621 struct net_device *netdev = adapter->netdev;
2622 struct ibmvnic_error_buff *error_buff;
2623 unsigned long timeout = msecs_to_jiffies(30000);
2624 union ibmvnic_crq crq;
2625 unsigned long flags;
2628 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2632 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
2633 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2634 if (!error_buff->buff) {
2639 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2641 if (dma_mapping_error(dev, error_buff->dma)) {
2642 netdev_err(netdev, "Couldn't map error buffer\n");
2643 kfree(error_buff->buff);
2648 error_buff->len = detail_len;
2649 error_buff->error_id = err_crq->error_indication.error_id;
2651 spin_lock_irqsave(&adapter->error_list_lock, flags);
2652 list_add_tail(&error_buff->list, &adapter->errors);
2653 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2655 memset(&crq, 0, sizeof(crq));
2656 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2657 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2658 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2659 crq.request_error_info.len = cpu_to_be32(detail_len);
2660 crq.request_error_info.error_id = err_crq->error_indication.error_id;
2662 rc = ibmvnic_send_crq(adapter, &crq);
2664 netdev_err(netdev, "failed to request error information\n");
2668 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2669 netdev_err(netdev, "timeout waiting for error information\n");
2676 spin_lock_irqsave(&adapter->error_list_lock, flags);
2677 list_del(&error_buff->list);
2678 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2680 kfree(error_buff->buff);
2684 static void handle_error_indication(union ibmvnic_crq *crq,
2685 struct ibmvnic_adapter *adapter)
2687 struct device *dev = &adapter->vdev->dev;
2689 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2690 crq->error_indication.flags
2691 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2692 be32_to_cpu(crq->error_indication.error_id),
2693 be16_to_cpu(crq->error_indication.error_cause));
2695 if (be32_to_cpu(crq->error_indication.error_id))
2696 request_error_information(adapter, crq);
2698 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
2699 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
2702 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2703 struct ibmvnic_adapter *adapter)
2705 struct net_device *netdev = adapter->netdev;
2706 struct device *dev = &adapter->vdev->dev;
2709 rc = crq->change_mac_addr_rsp.rc.code;
2711 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2714 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2718 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2719 struct ibmvnic_adapter *adapter)
2721 struct device *dev = &adapter->vdev->dev;
2725 atomic_dec(&adapter->running_cap_crqs);
2726 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2728 req_value = &adapter->req_tx_queues;
2732 req_value = &adapter->req_rx_queues;
2735 case REQ_RX_ADD_QUEUES:
2736 req_value = &adapter->req_rx_add_queues;
2739 case REQ_TX_ENTRIES_PER_SUBCRQ:
2740 req_value = &adapter->req_tx_entries_per_subcrq;
2741 name = "tx_entries_per_subcrq";
2743 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2744 req_value = &adapter->req_rx_add_entries_per_subcrq;
2745 name = "rx_add_entries_per_subcrq";
2748 req_value = &adapter->req_mtu;
2751 case PROMISC_REQUESTED:
2752 req_value = &adapter->promisc;
2756 dev_err(dev, "Got invalid cap request rsp %d\n",
2757 crq->request_capability.capability);
2761 switch (crq->request_capability_rsp.rc.code) {
2764 case PARTIALSUCCESS:
2765 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2767 (long int)be64_to_cpu(crq->request_capability_rsp.
2769 release_sub_crqs(adapter);
2770 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2771 ibmvnic_send_req_caps(adapter, 1);
2774 dev_err(dev, "Error %d in request cap rsp\n",
2775 crq->request_capability_rsp.rc.code);
2779 /* Done receiving requested capabilities, query IP offload support */
2780 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2781 union ibmvnic_crq newcrq;
2782 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2783 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2784 &adapter->ip_offload_buf;
2786 adapter->wait_capability = false;
2787 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2791 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2792 if (!firmware_has_feature(FW_FEATURE_CMO))
2793 dev_err(dev, "Couldn't map offload buffer\n");
2797 memset(&newcrq, 0, sizeof(newcrq));
2798 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2799 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2800 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2801 newcrq.query_ip_offload.ioba =
2802 cpu_to_be32(adapter->ip_offload_tok);
2804 ibmvnic_send_crq(adapter, &newcrq);
2808 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2809 struct ibmvnic_adapter *adapter)
2811 struct device *dev = &adapter->vdev->dev;
2812 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2813 struct ibmvnic_login_buffer *login = adapter->login_buf;
2816 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2818 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2819 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2821 /* If the number of queues requested can't be allocated by the
2822 * server, the login response will return with code 1. We will need
2823 * to resend the login buffer with fewer queues requested.
2825 if (login_rsp_crq->generic.rc.code) {
2826 adapter->renegotiate = true;
2827 complete(&adapter->init_done);
2831 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2832 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2833 netdev_dbg(adapter->netdev, "%016lx\n",
2834 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2838 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2839 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2840 adapter->req_rx_add_queues !=
2841 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2842 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2843 ibmvnic_remove(adapter->vdev);
2846 complete(&adapter->init_done);
2851 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2852 struct ibmvnic_adapter *adapter)
2854 struct device *dev = &adapter->vdev->dev;
2855 u8 map_id = crq->request_map_rsp.map_id;
2861 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2862 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2864 rc = crq->request_map_rsp.rc.code;
2866 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2868 /* need to find and zero tx/rx_pool map_id */
2869 for (i = 0; i < tx_subcrqs; i++) {
2870 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2871 adapter->tx_pool[i].long_term_buff.map_id = 0;
2873 for (i = 0; i < rx_subcrqs; i++) {
2874 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2875 adapter->rx_pool[i].long_term_buff.map_id = 0;
2878 complete(&adapter->fw_done);
2881 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2882 struct ibmvnic_adapter *adapter)
2884 struct device *dev = &adapter->vdev->dev;
2887 rc = crq->request_unmap_rsp.rc.code;
2889 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2892 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2893 struct ibmvnic_adapter *adapter)
2895 struct net_device *netdev = adapter->netdev;
2896 struct device *dev = &adapter->vdev->dev;
2899 rc = crq->query_map_rsp.rc.code;
2901 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2904 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2905 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2906 crq->query_map_rsp.free_pages);
2909 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2910 struct ibmvnic_adapter *adapter)
2912 struct net_device *netdev = adapter->netdev;
2913 struct device *dev = &adapter->vdev->dev;
2916 atomic_dec(&adapter->running_cap_crqs);
2917 netdev_dbg(netdev, "Outstanding queries: %d\n",
2918 atomic_read(&adapter->running_cap_crqs));
2919 rc = crq->query_capability.rc.code;
2921 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2925 switch (be16_to_cpu(crq->query_capability.capability)) {
2927 adapter->min_tx_queues =
2928 be64_to_cpu(crq->query_capability.number);
2929 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2930 adapter->min_tx_queues);
2933 adapter->min_rx_queues =
2934 be64_to_cpu(crq->query_capability.number);
2935 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2936 adapter->min_rx_queues);
2938 case MIN_RX_ADD_QUEUES:
2939 adapter->min_rx_add_queues =
2940 be64_to_cpu(crq->query_capability.number);
2941 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2942 adapter->min_rx_add_queues);
2945 adapter->max_tx_queues =
2946 be64_to_cpu(crq->query_capability.number);
2947 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2948 adapter->max_tx_queues);
2951 adapter->max_rx_queues =
2952 be64_to_cpu(crq->query_capability.number);
2953 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2954 adapter->max_rx_queues);
2956 case MAX_RX_ADD_QUEUES:
2957 adapter->max_rx_add_queues =
2958 be64_to_cpu(crq->query_capability.number);
2959 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2960 adapter->max_rx_add_queues);
2962 case MIN_TX_ENTRIES_PER_SUBCRQ:
2963 adapter->min_tx_entries_per_subcrq =
2964 be64_to_cpu(crq->query_capability.number);
2965 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2966 adapter->min_tx_entries_per_subcrq);
2968 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2969 adapter->min_rx_add_entries_per_subcrq =
2970 be64_to_cpu(crq->query_capability.number);
2971 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2972 adapter->min_rx_add_entries_per_subcrq);
2974 case MAX_TX_ENTRIES_PER_SUBCRQ:
2975 adapter->max_tx_entries_per_subcrq =
2976 be64_to_cpu(crq->query_capability.number);
2977 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2978 adapter->max_tx_entries_per_subcrq);
2980 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2981 adapter->max_rx_add_entries_per_subcrq =
2982 be64_to_cpu(crq->query_capability.number);
2983 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2984 adapter->max_rx_add_entries_per_subcrq);
2986 case TCP_IP_OFFLOAD:
2987 adapter->tcp_ip_offload =
2988 be64_to_cpu(crq->query_capability.number);
2989 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2990 adapter->tcp_ip_offload);
2992 case PROMISC_SUPPORTED:
2993 adapter->promisc_supported =
2994 be64_to_cpu(crq->query_capability.number);
2995 netdev_dbg(netdev, "promisc_supported = %lld\n",
2996 adapter->promisc_supported);
2999 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3000 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3001 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3004 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3005 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3006 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3008 case MAX_MULTICAST_FILTERS:
3009 adapter->max_multicast_filters =
3010 be64_to_cpu(crq->query_capability.number);
3011 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3012 adapter->max_multicast_filters);
3014 case VLAN_HEADER_INSERTION:
3015 adapter->vlan_header_insertion =
3016 be64_to_cpu(crq->query_capability.number);
3017 if (adapter->vlan_header_insertion)
3018 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3019 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3020 adapter->vlan_header_insertion);
3022 case RX_VLAN_HEADER_INSERTION:
3023 adapter->rx_vlan_header_insertion =
3024 be64_to_cpu(crq->query_capability.number);
3025 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3026 adapter->rx_vlan_header_insertion);
3028 case MAX_TX_SG_ENTRIES:
3029 adapter->max_tx_sg_entries =
3030 be64_to_cpu(crq->query_capability.number);
3031 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3032 adapter->max_tx_sg_entries);
3034 case RX_SG_SUPPORTED:
3035 adapter->rx_sg_supported =
3036 be64_to_cpu(crq->query_capability.number);
3037 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3038 adapter->rx_sg_supported);
3040 case OPT_TX_COMP_SUB_QUEUES:
3041 adapter->opt_tx_comp_sub_queues =
3042 be64_to_cpu(crq->query_capability.number);
3043 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3044 adapter->opt_tx_comp_sub_queues);
3046 case OPT_RX_COMP_QUEUES:
3047 adapter->opt_rx_comp_queues =
3048 be64_to_cpu(crq->query_capability.number);
3049 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3050 adapter->opt_rx_comp_queues);
3052 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3053 adapter->opt_rx_bufadd_q_per_rx_comp_q =
3054 be64_to_cpu(crq->query_capability.number);
3055 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3056 adapter->opt_rx_bufadd_q_per_rx_comp_q);
3058 case OPT_TX_ENTRIES_PER_SUBCRQ:
3059 adapter->opt_tx_entries_per_subcrq =
3060 be64_to_cpu(crq->query_capability.number);
3061 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
3062 adapter->opt_tx_entries_per_subcrq);
3064 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
3065 adapter->opt_rxba_entries_per_subcrq =
3066 be64_to_cpu(crq->query_capability.number);
3067 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
3068 adapter->opt_rxba_entries_per_subcrq);
3070 case TX_RX_DESC_REQ:
3071 adapter->tx_rx_desc_req = crq->query_capability.number;
3072 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
3073 adapter->tx_rx_desc_req);
3077 netdev_err(netdev, "Got invalid cap rsp %d\n",
3078 crq->query_capability.capability);
3082 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3083 adapter->wait_capability = false;
3084 ibmvnic_send_req_caps(adapter, 0);
3088 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3089 struct ibmvnic_adapter *adapter)
3091 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3092 struct net_device *netdev = adapter->netdev;
3093 struct device *dev = &adapter->vdev->dev;
3094 u64 *u64_crq = (u64 *)crq;
3097 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3098 (unsigned long int)cpu_to_be64(u64_crq[0]),
3099 (unsigned long int)cpu_to_be64(u64_crq[1]));
3100 switch (gen_crq->first) {
3101 case IBMVNIC_CRQ_INIT_RSP:
3102 switch (gen_crq->cmd) {
3103 case IBMVNIC_CRQ_INIT:
3104 dev_info(dev, "Partner initialized\n");
3106 case IBMVNIC_CRQ_INIT_COMPLETE:
3107 dev_info(dev, "Partner initialization complete\n");
3108 send_version_xchg(adapter);
3111 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3114 case IBMVNIC_CRQ_XPORT_EVENT:
3115 netif_carrier_off(netdev);
3116 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3117 dev_info(dev, "Migrated, re-enabling adapter\n");
3118 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
3119 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3120 dev_info(dev, "Backing device failover detected\n");
3121 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
3123 /* The adapter lost the connection */
3124 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3126 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3129 case IBMVNIC_CRQ_CMD_RSP:
3132 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3137 switch (gen_crq->cmd) {
3138 case VERSION_EXCHANGE_RSP:
3139 rc = crq->version_exchange_rsp.rc.code;
3141 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3144 dev_info(dev, "Partner protocol version is %d\n",
3145 crq->version_exchange_rsp.version);
3146 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3149 be16_to_cpu(crq->version_exchange_rsp.version);
3150 send_cap_queries(adapter);
3152 case QUERY_CAPABILITY_RSP:
3153 handle_query_cap_rsp(crq, adapter);
3156 handle_query_map_rsp(crq, adapter);
3158 case REQUEST_MAP_RSP:
3159 handle_request_map_rsp(crq, adapter);
3161 case REQUEST_UNMAP_RSP:
3162 handle_request_unmap_rsp(crq, adapter);
3164 case REQUEST_CAPABILITY_RSP:
3165 handle_request_cap_rsp(crq, adapter);
3168 netdev_dbg(netdev, "Got Login Response\n");
3169 handle_login_rsp(crq, adapter);
3171 case LOGICAL_LINK_STATE_RSP:
3173 "Got Logical Link State Response, state: %d rc: %d\n",
3174 crq->logical_link_state_rsp.link_state,
3175 crq->logical_link_state_rsp.rc.code);
3176 adapter->logical_link_state =
3177 crq->logical_link_state_rsp.link_state;
3178 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3179 complete(&adapter->init_done);
3181 case LINK_STATE_INDICATION:
3182 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3183 adapter->phys_link_state =
3184 crq->link_state_indication.phys_link_state;
3185 adapter->logical_link_state =
3186 crq->link_state_indication.logical_link_state;
3188 case CHANGE_MAC_ADDR_RSP:
3189 netdev_dbg(netdev, "Got MAC address change Response\n");
3190 handle_change_mac_rsp(crq, adapter);
3192 case ERROR_INDICATION:
3193 netdev_dbg(netdev, "Got Error Indication\n");
3194 handle_error_indication(crq, adapter);
3196 case REQUEST_ERROR_RSP:
3197 netdev_dbg(netdev, "Got Error Detail Response\n");
3198 handle_error_info_rsp(crq, adapter);
3200 case REQUEST_STATISTICS_RSP:
3201 netdev_dbg(netdev, "Got Statistics Response\n");
3202 complete(&adapter->stats_done);
3204 case QUERY_IP_OFFLOAD_RSP:
3205 netdev_dbg(netdev, "Got Query IP offload Response\n");
3206 handle_query_ip_offload_rsp(adapter);
3208 case MULTICAST_CTRL_RSP:
3209 netdev_dbg(netdev, "Got multicast control Response\n");
3211 case CONTROL_IP_OFFLOAD_RSP:
3212 netdev_dbg(netdev, "Got Control IP offload Response\n");
3213 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3214 sizeof(adapter->ip_offload_ctrl),
3216 complete(&adapter->init_done);
3218 case COLLECT_FW_TRACE_RSP:
3219 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3220 complete(&adapter->fw_done);
3223 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3228 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3230 struct ibmvnic_adapter *adapter = instance;
3232 tasklet_schedule(&adapter->tasklet);
3236 static void ibmvnic_tasklet(void *data)
3238 struct ibmvnic_adapter *adapter = data;
3239 struct ibmvnic_crq_queue *queue = &adapter->crq;
3240 union ibmvnic_crq *crq;
3241 unsigned long flags;
3244 spin_lock_irqsave(&queue->lock, flags);
3246 /* Pull all the valid messages off the CRQ */
3247 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3248 ibmvnic_handle_crq(crq, adapter);
3249 crq->generic.first = 0;
3252 /* remain in tasklet until all
3253 * capabilities responses are received
3255 if (!adapter->wait_capability)
3258 /* if capabilities CRQ's were sent in this tasklet, the following
3259 * tasklet must wait until all responses are received
3261 if (atomic_read(&adapter->running_cap_crqs) != 0)
3262 adapter->wait_capability = true;
3263 spin_unlock_irqrestore(&queue->lock, flags);
3266 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3268 struct vio_dev *vdev = adapter->vdev;
3272 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3273 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3276 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3281 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3283 struct ibmvnic_crq_queue *crq = &adapter->crq;
3284 struct device *dev = &adapter->vdev->dev;
3285 struct vio_dev *vdev = adapter->vdev;
3290 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3291 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3293 /* Clean out the queue */
3294 memset(crq->msgs, 0, PAGE_SIZE);
3297 /* And re-open it again */
3298 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3299 crq->msg_token, PAGE_SIZE);
3302 /* Adapter is good, but other end is not ready */
3303 dev_warn(dev, "Partner adapter not ready\n");
3305 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3310 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3312 struct ibmvnic_crq_queue *crq = &adapter->crq;
3313 struct vio_dev *vdev = adapter->vdev;
3319 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3320 free_irq(vdev->irq, adapter);
3321 tasklet_kill(&adapter->tasklet);
3323 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3324 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3326 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3328 free_page((unsigned long)crq->msgs);
3332 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3334 struct ibmvnic_crq_queue *crq = &adapter->crq;
3335 struct device *dev = &adapter->vdev->dev;
3336 struct vio_dev *vdev = adapter->vdev;
3337 int rc, retrc = -ENOMEM;
3342 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3343 /* Should we allocate more than one page? */
3348 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3349 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3351 if (dma_mapping_error(dev, crq->msg_token))
3354 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3355 crq->msg_token, PAGE_SIZE);
3357 if (rc == H_RESOURCE)
3358 /* maybe kexecing and resource is busy. try a reset */
3359 rc = ibmvnic_reset_crq(adapter);
3362 if (rc == H_CLOSED) {
3363 dev_warn(dev, "Partner adapter not ready\n");
3365 dev_warn(dev, "Error %d opening adapter\n", rc);
3366 goto reg_crq_failed;
3371 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3372 (unsigned long)adapter);
3374 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3375 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3378 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3380 goto req_irq_failed;
3383 rc = vio_enable_interrupts(vdev);
3385 dev_err(dev, "Error %d enabling interrupts\n", rc);
3386 goto req_irq_failed;
3390 spin_lock_init(&crq->lock);
3395 tasklet_kill(&adapter->tasklet);
3397 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3398 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3400 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3402 free_page((unsigned long)crq->msgs);
3407 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3409 struct device *dev = &adapter->vdev->dev;
3410 unsigned long timeout = msecs_to_jiffies(30000);
3413 rc = init_crq_queue(adapter);
3415 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3419 init_completion(&adapter->init_done);
3420 ibmvnic_send_crq_init(adapter);
3421 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3422 dev_err(dev, "Initialization sequence timed out\n");
3423 release_crq_queue(adapter);
3427 rc = init_sub_crqs(adapter);
3429 dev_err(dev, "Initialization of sub crqs failed\n");
3430 release_crq_queue(adapter);
3436 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3438 struct ibmvnic_adapter *adapter;
3439 struct net_device *netdev;
3440 unsigned char *mac_addr_p;
3443 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3446 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3447 VETH_MAC_ADDR, NULL);
3450 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3451 __FILE__, __LINE__);
3455 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3456 IBMVNIC_MAX_TX_QUEUES);
3460 adapter = netdev_priv(netdev);
3461 adapter->state = VNIC_PROBING;
3462 dev_set_drvdata(&dev->dev, netdev);
3463 adapter->vdev = dev;
3464 adapter->netdev = netdev;
3466 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3467 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3468 netdev->irq = dev->irq;
3469 netdev->netdev_ops = &ibmvnic_netdev_ops;
3470 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3471 SET_NETDEV_DEV(netdev, &dev->dev);
3473 spin_lock_init(&adapter->stats_lock);
3475 INIT_LIST_HEAD(&adapter->errors);
3476 spin_lock_init(&adapter->error_list_lock);
3478 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
3479 INIT_LIST_HEAD(&adapter->rwi_list);
3480 mutex_init(&adapter->reset_lock);
3481 mutex_init(&adapter->rwi_lock);
3482 adapter->resetting = false;
3484 rc = ibmvnic_init(adapter);
3486 free_netdev(netdev);
3490 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3492 rc = register_netdev(netdev);
3494 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3495 free_netdev(netdev);
3498 dev_info(&dev->dev, "ibmvnic registered\n");
3500 adapter->state = VNIC_PROBED;
3504 static int ibmvnic_remove(struct vio_dev *dev)
3506 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3507 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3509 adapter->state = VNIC_REMOVING;
3510 unregister_netdev(netdev);
3511 mutex_lock(&adapter->reset_lock);
3513 release_resources(adapter);
3514 release_sub_crqs(adapter);
3515 release_crq_queue(adapter);
3517 adapter->state = VNIC_REMOVED;
3519 mutex_unlock(&adapter->reset_lock);
3520 free_netdev(netdev);
3521 dev_set_drvdata(&dev->dev, NULL);
3526 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3528 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3529 struct ibmvnic_adapter *adapter;
3530 struct iommu_table *tbl;
3531 unsigned long ret = 0;
3534 tbl = get_iommu_table_base(&vdev->dev);
3536 /* netdev inits at probe time along with the structures we need below*/
3538 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3540 adapter = netdev_priv(netdev);
3542 ret += PAGE_SIZE; /* the crq message queue */
3543 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3545 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3546 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3548 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3550 ret += adapter->rx_pool[i].size *
3551 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3556 static int ibmvnic_resume(struct device *dev)
3558 struct net_device *netdev = dev_get_drvdata(dev);
3559 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3562 /* kick the interrupt handlers just in case we lost an interrupt */
3563 for (i = 0; i < adapter->req_rx_queues; i++)
3564 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3565 adapter->rx_scrq[i]);
3570 static struct vio_device_id ibmvnic_device_table[] = {
3571 {"network", "IBM,vnic"},
3574 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3576 static const struct dev_pm_ops ibmvnic_pm_ops = {
3577 .resume = ibmvnic_resume
3580 static struct vio_driver ibmvnic_driver = {
3581 .id_table = ibmvnic_device_table,
3582 .probe = ibmvnic_probe,
3583 .remove = ibmvnic_remove,
3584 .get_desired_dma = ibmvnic_get_desired_dma,
3585 .name = ibmvnic_driver_name,
3586 .pm = &ibmvnic_pm_ops,
3589 /* module functions */
3590 static int __init ibmvnic_module_init(void)
3592 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3593 IBMVNIC_DRIVER_VERSION);
3595 return vio_register_driver(&ibmvnic_driver);
3598 static void __exit ibmvnic_module_exit(void)
3600 vio_unregister_driver(&ibmvnic_driver);
3603 module_init(ibmvnic_module_init);
3604 module_exit(ibmvnic_module_exit);