1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
80 static const char ibmvnic_driver_name[] = "ibmvnic";
81 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
84 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
89 static int ibmvnic_remove(struct vio_dev *);
90 static void release_sub_crqs(struct ibmvnic_adapter *);
91 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
92 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
93 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
94 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
95 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
96 union sub_crq *sub_crq);
97 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
98 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
99 static int enable_scrq_irq(struct ibmvnic_adapter *,
100 struct ibmvnic_sub_crq_queue *);
101 static int disable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103 static int pending_scrq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107 static int ibmvnic_poll(struct napi_struct *napi, int data);
108 static void send_map_query(struct ibmvnic_adapter *adapter);
109 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
110 static void send_request_unmap(struct ibmvnic_adapter *, u8);
111 static void send_login(struct ibmvnic_adapter *adapter);
112 static void send_cap_queries(struct ibmvnic_adapter *adapter);
113 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
114 static int ibmvnic_init(struct ibmvnic_adapter *);
115 static void release_crq_queue(struct ibmvnic_adapter *);
117 struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
122 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
126 static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
151 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
152 unsigned long length, unsigned long *number,
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
158 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
165 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
166 struct ibmvnic_long_term_buff *ltb, int size)
168 struct device *dev = &adapter->vdev->dev;
171 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
175 dev_err(dev, "Couldn't alloc long term buffer\n");
178 ltb->map_id = adapter->map_id;
181 init_completion(&adapter->fw_done);
182 send_request_map(adapter, ltb->addr,
183 ltb->size, ltb->map_id);
184 wait_for_completion(&adapter->fw_done);
188 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
189 struct ibmvnic_long_term_buff *ltb)
191 struct device *dev = &adapter->vdev->dev;
196 if (!adapter->failover)
197 send_request_unmap(adapter, ltb->map_id);
198 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
201 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
202 struct ibmvnic_rx_pool *pool)
204 int count = pool->size - atomic_read(&pool->available);
205 struct device *dev = &adapter->vdev->dev;
206 int buffers_added = 0;
207 unsigned long lpar_rc;
208 union sub_crq sub_crq;
218 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
219 be32_to_cpu(adapter->login_rsp_buf->
222 for (i = 0; i < count; ++i) {
223 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
225 dev_err(dev, "Couldn't replenish rx buff\n");
226 adapter->replenish_no_mem++;
230 index = pool->free_map[pool->next_free];
232 if (pool->rx_buff[index].skb)
233 dev_err(dev, "Inconsistent free_map!\n");
235 /* Copy the skb to the long term mapped DMA buffer */
236 offset = index * pool->buff_size;
237 dst = pool->long_term_buff.buff + offset;
238 memset(dst, 0, pool->buff_size);
239 dma_addr = pool->long_term_buff.addr + offset;
240 pool->rx_buff[index].data = dst;
242 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
243 pool->rx_buff[index].dma = dma_addr;
244 pool->rx_buff[index].skb = skb;
245 pool->rx_buff[index].pool_index = pool->index;
246 pool->rx_buff[index].size = pool->buff_size;
248 memset(&sub_crq, 0, sizeof(sub_crq));
249 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
250 sub_crq.rx_add.correlator =
251 cpu_to_be64((u64)&pool->rx_buff[index]);
252 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
253 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
255 /* The length field of the sCRQ is defined to be 24 bits so the
256 * buffer size needs to be left shifted by a byte before it is
257 * converted to big endian to prevent the last byte from being
260 #ifdef __LITTLE_ENDIAN__
263 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
265 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
267 if (lpar_rc != H_SUCCESS)
271 adapter->replenish_add_buff_success++;
272 pool->next_free = (pool->next_free + 1) % pool->size;
274 atomic_add(buffers_added, &pool->available);
278 dev_info(dev, "replenish pools failure\n");
279 pool->free_map[pool->next_free] = index;
280 pool->rx_buff[index].skb = NULL;
281 if (!dma_mapping_error(dev, dma_addr))
282 dma_unmap_single(dev, dma_addr, pool->buff_size,
285 dev_kfree_skb_any(skb);
286 adapter->replenish_add_buff_failure++;
287 atomic_add(buffers_added, &pool->available);
290 static void replenish_pools(struct ibmvnic_adapter *adapter)
294 if (adapter->migrated)
297 adapter->replenish_task_cycles++;
298 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
300 if (adapter->rx_pool[i].active)
301 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
305 static void release_stats_token(struct ibmvnic_adapter *adapter)
307 struct device *dev = &adapter->vdev->dev;
309 if (!adapter->stats_token)
312 dma_unmap_single(dev, adapter->stats_token,
313 sizeof(struct ibmvnic_statistics),
315 adapter->stats_token = 0;
318 static int init_stats_token(struct ibmvnic_adapter *adapter)
320 struct device *dev = &adapter->vdev->dev;
323 stok = dma_map_single(dev, &adapter->stats,
324 sizeof(struct ibmvnic_statistics),
326 if (dma_mapping_error(dev, stok)) {
327 dev_err(dev, "Couldn't map stats buffer\n");
331 adapter->stats_token = stok;
335 static void release_rx_pools(struct ibmvnic_adapter *adapter)
337 struct ibmvnic_rx_pool *rx_pool;
341 if (!adapter->rx_pool)
344 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 for (i = 0; i < rx_scrqs; i++) {
346 rx_pool = &adapter->rx_pool[i];
348 kfree(rx_pool->free_map);
349 free_long_term_buff(adapter, &rx_pool->long_term_buff);
351 if (!rx_pool->rx_buff)
354 for (j = 0; j < rx_pool->size; j++) {
355 if (rx_pool->rx_buff[j].skb) {
356 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
357 rx_pool->rx_buff[i].skb = NULL;
361 kfree(rx_pool->rx_buff);
364 kfree(adapter->rx_pool);
365 adapter->rx_pool = NULL;
368 static int init_rx_pools(struct net_device *netdev)
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_rx_pool *rx_pool;
378 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
379 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
380 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
382 adapter->rx_pool = kcalloc(rxadd_subcrqs,
383 sizeof(struct ibmvnic_rx_pool),
385 if (!adapter->rx_pool) {
386 dev_err(dev, "Failed to allocate rx pools\n");
390 for (i = 0; i < rxadd_subcrqs; i++) {
391 rx_pool = &adapter->rx_pool[i];
393 netdev_dbg(adapter->netdev,
394 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
395 i, adapter->req_rx_add_entries_per_subcrq,
396 be64_to_cpu(size_array[i]));
398 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
400 rx_pool->buff_size = be64_to_cpu(size_array[i]);
403 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
405 if (!rx_pool->free_map) {
406 release_rx_pools(adapter);
410 rx_pool->rx_buff = kcalloc(rx_pool->size,
411 sizeof(struct ibmvnic_rx_buff),
413 if (!rx_pool->rx_buff) {
414 dev_err(dev, "Couldn't alloc rx buffers\n");
415 release_rx_pools(adapter);
419 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
420 rx_pool->size * rx_pool->buff_size)) {
421 release_rx_pools(adapter);
425 for (j = 0; j < rx_pool->size; ++j)
426 rx_pool->free_map[j] = j;
428 atomic_set(&rx_pool->available, 0);
429 rx_pool->next_alloc = 0;
430 rx_pool->next_free = 0;
436 static void release_tx_pools(struct ibmvnic_adapter *adapter)
438 struct ibmvnic_tx_pool *tx_pool;
441 if (!adapter->tx_pool)
444 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
445 for (i = 0; i < tx_scrqs; i++) {
446 tx_pool = &adapter->tx_pool[i];
447 kfree(tx_pool->tx_buff);
448 free_long_term_buff(adapter, &tx_pool->long_term_buff);
449 kfree(tx_pool->free_map);
452 kfree(adapter->tx_pool);
453 adapter->tx_pool = NULL;
456 static int init_tx_pools(struct net_device *netdev)
458 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
459 struct device *dev = &adapter->vdev->dev;
460 struct ibmvnic_tx_pool *tx_pool;
464 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
465 adapter->tx_pool = kcalloc(tx_subcrqs,
466 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
467 if (!adapter->tx_pool)
470 for (i = 0; i < tx_subcrqs; i++) {
471 tx_pool = &adapter->tx_pool[i];
472 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
473 sizeof(struct ibmvnic_tx_buff),
475 if (!tx_pool->tx_buff) {
476 dev_err(dev, "tx pool buffer allocation failed\n");
477 release_tx_pools(adapter);
481 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
482 adapter->req_tx_entries_per_subcrq *
484 release_tx_pools(adapter);
488 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
489 sizeof(int), GFP_KERNEL);
490 if (!tx_pool->free_map) {
491 release_tx_pools(adapter);
495 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
496 tx_pool->free_map[j] = j;
498 tx_pool->consumer_index = 0;
499 tx_pool->producer_index = 0;
505 static void release_bounce_buffer(struct ibmvnic_adapter *adapter)
507 struct device *dev = &adapter->vdev->dev;
509 if (!adapter->bounce_buffer)
512 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
513 dma_unmap_single(dev, adapter->bounce_buffer_dma,
514 adapter->bounce_buffer_size,
516 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
519 kfree(adapter->bounce_buffer);
520 adapter->bounce_buffer = NULL;
523 static int init_bounce_buffer(struct net_device *netdev)
525 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
526 struct device *dev = &adapter->vdev->dev;
531 buf_sz = (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
532 buf = kmalloc(adapter->bounce_buffer_size, GFP_KERNEL);
536 map_addr = dma_map_single(dev, buf, buf_sz, DMA_TO_DEVICE);
537 if (dma_mapping_error(dev, map_addr)) {
538 dev_err(dev, "Couldn't map bounce buffer\n");
543 adapter->bounce_buffer = buf;
544 adapter->bounce_buffer_size = buf_sz;
545 adapter->bounce_buffer_dma = map_addr;
549 static int ibmvnic_login(struct net_device *netdev)
551 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
552 unsigned long timeout = msecs_to_jiffies(30000);
553 struct device *dev = &adapter->vdev->dev;
556 if (adapter->renegotiate) {
557 adapter->renegotiate = false;
558 release_sub_crqs(adapter);
560 reinit_completion(&adapter->init_done);
561 send_cap_queries(adapter);
562 if (!wait_for_completion_timeout(&adapter->init_done,
564 dev_err(dev, "Capabilities query timeout\n");
569 reinit_completion(&adapter->init_done);
571 if (!wait_for_completion_timeout(&adapter->init_done,
573 dev_err(dev, "Login timeout\n");
576 } while (adapter->renegotiate);
581 static void release_resources(struct ibmvnic_adapter *adapter)
583 release_bounce_buffer(adapter);
584 release_tx_pools(adapter);
585 release_rx_pools(adapter);
587 release_sub_crqs(adapter);
588 release_crq_queue(adapter);
590 release_stats_token(adapter);
593 static int ibmvnic_open(struct net_device *netdev)
595 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
596 struct device *dev = &adapter->vdev->dev;
597 union ibmvnic_crq crq;
601 if (adapter->is_closed) {
602 rc = ibmvnic_init(adapter);
607 rc = ibmvnic_login(netdev);
611 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
613 dev_err(dev, "failed to set the number of tx queues\n");
617 rc = init_sub_crq_irqs(adapter);
619 dev_err(dev, "failed to initialize sub crq irqs\n");
624 adapter->napi = kcalloc(adapter->req_rx_queues,
625 sizeof(struct napi_struct), GFP_KERNEL);
627 goto ibmvnic_open_fail;
628 for (i = 0; i < adapter->req_rx_queues; i++) {
629 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
631 napi_enable(&adapter->napi[i]);
634 send_map_query(adapter);
636 rc = init_rx_pools(netdev);
638 goto ibmvnic_open_fail;
640 rc = init_tx_pools(netdev);
642 goto ibmvnic_open_fail;
644 rc = init_bounce_buffer(netdev);
646 goto ibmvnic_open_fail;
648 replenish_pools(adapter);
650 /* We're ready to receive frames, enable the sub-crq interrupts and
651 * set the logical link state to up
653 for (i = 0; i < adapter->req_rx_queues; i++)
654 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
656 for (i = 0; i < adapter->req_tx_queues; i++)
657 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
659 memset(&crq, 0, sizeof(crq));
660 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
661 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
662 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
663 ibmvnic_send_crq(adapter, &crq);
665 netif_tx_start_all_queues(netdev);
666 adapter->is_closed = false;
671 for (i = 0; i < adapter->req_rx_queues; i++)
672 napi_disable(&adapter->napi[i]);
673 release_resources(adapter);
677 static int ibmvnic_close(struct net_device *netdev)
679 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
680 union ibmvnic_crq crq;
683 adapter->closing = true;
685 for (i = 0; i < adapter->req_rx_queues; i++)
686 napi_disable(&adapter->napi[i]);
688 if (!adapter->failover)
689 netif_tx_stop_all_queues(netdev);
691 memset(&crq, 0, sizeof(crq));
692 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
693 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
694 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
695 ibmvnic_send_crq(adapter, &crq);
697 release_resources(adapter);
699 adapter->is_closed = true;
700 adapter->closing = false;
705 * build_hdr_data - creates L2/L3/L4 header data buffer
706 * @hdr_field - bitfield determining needed headers
707 * @skb - socket buffer
708 * @hdr_len - array of header lengths
709 * @tot_len - total length of data
711 * Reads hdr_field to determine which headers are needed by firmware.
712 * Builds a buffer containing these headers. Saves individual header
713 * lengths and total buffer length to be used to build descriptors.
715 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
716 int *hdr_len, u8 *hdr_data)
721 hdr_len[0] = sizeof(struct ethhdr);
723 if (skb->protocol == htons(ETH_P_IP)) {
724 hdr_len[1] = ip_hdr(skb)->ihl * 4;
725 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
726 hdr_len[2] = tcp_hdrlen(skb);
727 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
728 hdr_len[2] = sizeof(struct udphdr);
729 } else if (skb->protocol == htons(ETH_P_IPV6)) {
730 hdr_len[1] = sizeof(struct ipv6hdr);
731 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
732 hdr_len[2] = tcp_hdrlen(skb);
733 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
734 hdr_len[2] = sizeof(struct udphdr);
737 memset(hdr_data, 0, 120);
738 if ((hdr_field >> 6) & 1) {
739 hdr = skb_mac_header(skb);
740 memcpy(hdr_data, hdr, hdr_len[0]);
744 if ((hdr_field >> 5) & 1) {
745 hdr = skb_network_header(skb);
746 memcpy(hdr_data + len, hdr, hdr_len[1]);
750 if ((hdr_field >> 4) & 1) {
751 hdr = skb_transport_header(skb);
752 memcpy(hdr_data + len, hdr, hdr_len[2]);
759 * create_hdr_descs - create header and header extension descriptors
760 * @hdr_field - bitfield determining needed headers
761 * @data - buffer containing header data
762 * @len - length of data buffer
763 * @hdr_len - array of individual header lengths
764 * @scrq_arr - descriptor array
766 * Creates header and, if needed, header extension descriptors and
767 * places them in a descriptor array, scrq_arr
770 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
771 union sub_crq *scrq_arr)
773 union sub_crq hdr_desc;
778 while (tmp_len > 0) {
779 cur = hdr_data + len - tmp_len;
781 memset(&hdr_desc, 0, sizeof(hdr_desc));
782 if (cur != hdr_data) {
783 data = hdr_desc.hdr_ext.data;
784 tmp = tmp_len > 29 ? 29 : tmp_len;
785 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
786 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
787 hdr_desc.hdr_ext.len = tmp;
789 data = hdr_desc.hdr.data;
790 tmp = tmp_len > 24 ? 24 : tmp_len;
791 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
792 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
793 hdr_desc.hdr.len = tmp;
794 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
795 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
796 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
797 hdr_desc.hdr.flag = hdr_field << 1;
799 memcpy(data, cur, tmp);
801 *scrq_arr = hdr_desc;
807 * build_hdr_descs_arr - build a header descriptor array
808 * @skb - socket buffer
809 * @num_entries - number of descriptors to be sent
810 * @subcrq - first TX descriptor
811 * @hdr_field - bit field determining which headers will be sent
813 * This function will build a TX descriptor array with applicable
814 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
817 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
818 int *num_entries, u8 hdr_field)
820 int hdr_len[3] = {0, 0, 0};
822 u8 *hdr_data = txbuff->hdr_data;
824 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
829 num_entries += len % 29 ? len / 29 + 1 : len / 29;
830 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
831 txbuff->indir_arr + 1);
834 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
836 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
837 int queue_num = skb_get_queue_mapping(skb);
838 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
839 struct device *dev = &adapter->vdev->dev;
840 struct ibmvnic_tx_buff *tx_buff = NULL;
841 struct ibmvnic_sub_crq_queue *tx_scrq;
842 struct ibmvnic_tx_pool *tx_pool;
843 unsigned int tx_send_failed = 0;
844 unsigned int tx_map_failed = 0;
845 unsigned int tx_dropped = 0;
846 unsigned int tx_packets = 0;
847 unsigned int tx_bytes = 0;
848 dma_addr_t data_dma_addr;
849 struct netdev_queue *txq;
850 bool used_bounce = false;
851 unsigned long lpar_rc;
852 union sub_crq tx_crq;
860 tx_pool = &adapter->tx_pool[queue_num];
861 tx_scrq = adapter->tx_scrq[queue_num];
862 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
863 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
864 be32_to_cpu(adapter->login_rsp_buf->
865 off_txsubm_subcrqs));
866 if (adapter->migrated) {
869 ret = NETDEV_TX_BUSY;
873 index = tx_pool->free_map[tx_pool->consumer_index];
874 offset = index * adapter->req_mtu;
875 dst = tx_pool->long_term_buff.buff + offset;
876 memset(dst, 0, adapter->req_mtu);
877 skb_copy_from_linear_data(skb, dst, skb->len);
878 data_dma_addr = tx_pool->long_term_buff.addr + offset;
880 tx_pool->consumer_index =
881 (tx_pool->consumer_index + 1) %
882 adapter->req_tx_entries_per_subcrq;
884 tx_buff = &tx_pool->tx_buff[index];
886 tx_buff->data_dma[0] = data_dma_addr;
887 tx_buff->data_len[0] = skb->len;
888 tx_buff->index = index;
889 tx_buff->pool_index = queue_num;
890 tx_buff->last_frag = true;
891 tx_buff->used_bounce = used_bounce;
893 memset(&tx_crq, 0, sizeof(tx_crq));
894 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
895 tx_crq.v1.type = IBMVNIC_TX_DESC;
896 tx_crq.v1.n_crq_elem = 1;
898 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
899 tx_crq.v1.correlator = cpu_to_be32(index);
900 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
901 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
902 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
904 if (adapter->vlan_header_insertion) {
905 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
906 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
909 if (skb->protocol == htons(ETH_P_IP)) {
910 if (ip_hdr(skb)->version == 4)
911 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
912 else if (ip_hdr(skb)->version == 6)
913 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
915 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
916 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
917 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
918 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
921 if (skb->ip_summed == CHECKSUM_PARTIAL) {
922 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
925 /* determine if l2/3/4 headers are sent to firmware */
926 if ((*hdrs >> 7) & 1 &&
927 (skb->protocol == htons(ETH_P_IP) ||
928 skb->protocol == htons(ETH_P_IPV6))) {
929 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
930 tx_crq.v1.n_crq_elem = num_entries;
931 tx_buff->indir_arr[0] = tx_crq;
932 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
933 sizeof(tx_buff->indir_arr),
935 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
936 if (!firmware_has_feature(FW_FEATURE_CMO))
937 dev_err(dev, "tx: unable to map descriptor array\n");
940 ret = NETDEV_TX_BUSY;
943 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
944 (u64)tx_buff->indir_dma,
947 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
950 if (lpar_rc != H_SUCCESS) {
951 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
953 if (tx_pool->consumer_index == 0)
954 tx_pool->consumer_index =
955 adapter->req_tx_entries_per_subcrq - 1;
957 tx_pool->consumer_index--;
961 ret = NETDEV_TX_BUSY;
965 if (atomic_inc_return(&tx_scrq->used)
966 >= adapter->req_tx_entries_per_subcrq) {
967 netdev_info(netdev, "Stopping queue %d\n", queue_num);
968 netif_stop_subqueue(netdev, queue_num);
972 tx_bytes += skb->len;
973 txq->trans_start = jiffies;
977 netdev->stats.tx_dropped += tx_dropped;
978 netdev->stats.tx_bytes += tx_bytes;
979 netdev->stats.tx_packets += tx_packets;
980 adapter->tx_send_failed += tx_send_failed;
981 adapter->tx_map_failed += tx_map_failed;
986 static void ibmvnic_set_multi(struct net_device *netdev)
988 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
989 struct netdev_hw_addr *ha;
990 union ibmvnic_crq crq;
992 memset(&crq, 0, sizeof(crq));
993 crq.request_capability.first = IBMVNIC_CRQ_CMD;
994 crq.request_capability.cmd = REQUEST_CAPABILITY;
996 if (netdev->flags & IFF_PROMISC) {
997 if (!adapter->promisc_supported)
1000 if (netdev->flags & IFF_ALLMULTI) {
1001 /* Accept all multicast */
1002 memset(&crq, 0, sizeof(crq));
1003 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1004 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1005 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1006 ibmvnic_send_crq(adapter, &crq);
1007 } else if (netdev_mc_empty(netdev)) {
1008 /* Reject all multicast */
1009 memset(&crq, 0, sizeof(crq));
1010 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1011 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1012 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1013 ibmvnic_send_crq(adapter, &crq);
1015 /* Accept one or more multicast(s) */
1016 netdev_for_each_mc_addr(ha, netdev) {
1017 memset(&crq, 0, sizeof(crq));
1018 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1019 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1020 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1021 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1023 ibmvnic_send_crq(adapter, &crq);
1029 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1031 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1032 struct sockaddr *addr = p;
1033 union ibmvnic_crq crq;
1035 if (!is_valid_ether_addr(addr->sa_data))
1036 return -EADDRNOTAVAIL;
1038 memset(&crq, 0, sizeof(crq));
1039 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1040 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1041 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1042 ibmvnic_send_crq(adapter, &crq);
1043 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1047 static void ibmvnic_tx_timeout(struct net_device *dev)
1049 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1052 /* Adapter timed out, resetting it */
1053 release_sub_crqs(adapter);
1054 rc = ibmvnic_reset_crq(adapter);
1056 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1058 ibmvnic_send_crq_init(adapter);
1061 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1062 struct ibmvnic_rx_buff *rx_buff)
1064 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1066 rx_buff->skb = NULL;
1068 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1069 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1071 atomic_dec(&pool->available);
1074 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1076 struct net_device *netdev = napi->dev;
1077 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1078 int scrq_num = (int)(napi - adapter->napi);
1079 int frames_processed = 0;
1081 while (frames_processed < budget) {
1082 struct sk_buff *skb;
1083 struct ibmvnic_rx_buff *rx_buff;
1084 union sub_crq *next;
1089 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1091 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1093 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1094 rx_comp.correlator);
1095 /* do error checking */
1096 if (next->rx_comp.rc) {
1097 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1098 /* free the entry */
1099 next->rx_comp.first = 0;
1100 remove_buff_from_pool(adapter, rx_buff);
1104 length = be32_to_cpu(next->rx_comp.len);
1105 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1106 flags = next->rx_comp.flags;
1108 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1110 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1111 /* free the entry */
1112 next->rx_comp.first = 0;
1113 remove_buff_from_pool(adapter, rx_buff);
1115 skb_put(skb, length);
1116 skb->protocol = eth_type_trans(skb, netdev);
1118 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1119 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1120 skb->ip_summed = CHECKSUM_UNNECESSARY;
1124 napi_gro_receive(napi, skb); /* send it up */
1125 netdev->stats.rx_packets++;
1126 netdev->stats.rx_bytes += length;
1129 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1131 if (frames_processed < budget) {
1132 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1133 napi_complete_done(napi, frames_processed);
1134 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1135 napi_reschedule(napi)) {
1136 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1140 return frames_processed;
1143 #ifdef CONFIG_NET_POLL_CONTROLLER
1144 static void ibmvnic_netpoll_controller(struct net_device *dev)
1146 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1149 replenish_pools(netdev_priv(dev));
1150 for (i = 0; i < adapter->req_rx_queues; i++)
1151 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1152 adapter->rx_scrq[i]);
1156 static const struct net_device_ops ibmvnic_netdev_ops = {
1157 .ndo_open = ibmvnic_open,
1158 .ndo_stop = ibmvnic_close,
1159 .ndo_start_xmit = ibmvnic_xmit,
1160 .ndo_set_rx_mode = ibmvnic_set_multi,
1161 .ndo_set_mac_address = ibmvnic_set_mac,
1162 .ndo_validate_addr = eth_validate_addr,
1163 .ndo_tx_timeout = ibmvnic_tx_timeout,
1164 #ifdef CONFIG_NET_POLL_CONTROLLER
1165 .ndo_poll_controller = ibmvnic_netpoll_controller,
1169 /* ethtool functions */
1171 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1172 struct ethtool_link_ksettings *cmd)
1174 u32 supported, advertising;
1176 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1178 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1180 cmd->base.speed = SPEED_1000;
1181 cmd->base.duplex = DUPLEX_FULL;
1182 cmd->base.port = PORT_FIBRE;
1183 cmd->base.phy_address = 0;
1184 cmd->base.autoneg = AUTONEG_ENABLE;
1186 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1188 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1194 static void ibmvnic_get_drvinfo(struct net_device *dev,
1195 struct ethtool_drvinfo *info)
1197 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1198 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1201 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1203 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1205 return adapter->msg_enable;
1208 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1210 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1212 adapter->msg_enable = data;
1215 static u32 ibmvnic_get_link(struct net_device *netdev)
1217 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1219 /* Don't need to send a query because we request a logical link up at
1220 * init and then we wait for link state indications
1222 return adapter->logical_link_state;
1225 static void ibmvnic_get_ringparam(struct net_device *netdev,
1226 struct ethtool_ringparam *ring)
1228 ring->rx_max_pending = 0;
1229 ring->tx_max_pending = 0;
1230 ring->rx_mini_max_pending = 0;
1231 ring->rx_jumbo_max_pending = 0;
1232 ring->rx_pending = 0;
1233 ring->tx_pending = 0;
1234 ring->rx_mini_pending = 0;
1235 ring->rx_jumbo_pending = 0;
1238 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1242 if (stringset != ETH_SS_STATS)
1245 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1246 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1249 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1253 return ARRAY_SIZE(ibmvnic_stats);
1259 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1260 struct ethtool_stats *stats, u64 *data)
1262 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1263 union ibmvnic_crq crq;
1266 memset(&crq, 0, sizeof(crq));
1267 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1268 crq.request_statistics.cmd = REQUEST_STATISTICS;
1269 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1270 crq.request_statistics.len =
1271 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1273 /* Wait for data to be written */
1274 init_completion(&adapter->stats_done);
1275 ibmvnic_send_crq(adapter, &crq);
1276 wait_for_completion(&adapter->stats_done);
1278 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1279 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1282 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1283 .get_drvinfo = ibmvnic_get_drvinfo,
1284 .get_msglevel = ibmvnic_get_msglevel,
1285 .set_msglevel = ibmvnic_set_msglevel,
1286 .get_link = ibmvnic_get_link,
1287 .get_ringparam = ibmvnic_get_ringparam,
1288 .get_strings = ibmvnic_get_strings,
1289 .get_sset_count = ibmvnic_get_sset_count,
1290 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1291 .get_link_ksettings = ibmvnic_get_link_ksettings,
1294 /* Routines for managing CRQs/sCRQs */
1296 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1297 struct ibmvnic_sub_crq_queue *scrq)
1299 struct device *dev = &adapter->vdev->dev;
1302 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1304 /* Close the sub-crqs */
1306 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1307 adapter->vdev->unit_address,
1309 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1312 netdev_err(adapter->netdev,
1313 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1317 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1319 free_pages((unsigned long)scrq->msgs, 2);
1323 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1326 struct device *dev = &adapter->vdev->dev;
1327 struct ibmvnic_sub_crq_queue *scrq;
1330 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1334 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1335 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1337 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1338 goto zero_page_failed;
1341 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1343 if (dma_mapping_error(dev, scrq->msg_token)) {
1344 dev_warn(dev, "Couldn't map crq queue messages page\n");
1348 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1349 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1351 if (rc == H_RESOURCE)
1352 rc = ibmvnic_reset_crq(adapter);
1354 if (rc == H_CLOSED) {
1355 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1357 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1361 scrq->adapter = adapter;
1362 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1364 atomic_set(&scrq->used, 0);
1365 scrq->rx_skb_top = NULL;
1366 spin_lock_init(&scrq->lock);
1368 netdev_dbg(adapter->netdev,
1369 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1370 scrq->crq_num, scrq->hw_irq, scrq->irq);
1375 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1378 free_pages((unsigned long)scrq->msgs, 2);
1385 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1389 if (adapter->tx_scrq) {
1390 for (i = 0; i < adapter->req_tx_queues; i++) {
1391 if (!adapter->tx_scrq[i])
1394 if (adapter->tx_scrq[i]->irq) {
1395 free_irq(adapter->tx_scrq[i]->irq,
1396 adapter->tx_scrq[i]);
1397 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1398 adapter->tx_scrq[i]->irq = 0;
1401 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1404 kfree(adapter->tx_scrq);
1405 adapter->tx_scrq = NULL;
1408 if (adapter->rx_scrq) {
1409 for (i = 0; i < adapter->req_rx_queues; i++) {
1410 if (!adapter->rx_scrq[i])
1413 if (adapter->rx_scrq[i]->irq) {
1414 free_irq(adapter->rx_scrq[i]->irq,
1415 adapter->rx_scrq[i]);
1416 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1417 adapter->rx_scrq[i]->irq = 0;
1420 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1423 kfree(adapter->rx_scrq);
1424 adapter->rx_scrq = NULL;
1428 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1429 struct ibmvnic_sub_crq_queue *scrq)
1431 struct device *dev = &adapter->vdev->dev;
1434 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1435 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1437 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1442 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1443 struct ibmvnic_sub_crq_queue *scrq)
1445 struct device *dev = &adapter->vdev->dev;
1448 if (scrq->hw_irq > 0x100000000ULL) {
1449 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1453 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1454 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1456 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1461 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1462 struct ibmvnic_sub_crq_queue *scrq)
1464 struct device *dev = &adapter->vdev->dev;
1465 struct ibmvnic_tx_buff *txbuff;
1466 union sub_crq *next;
1472 while (pending_scrq(adapter, scrq)) {
1473 unsigned int pool = scrq->pool_index;
1475 next = ibmvnic_next_scrq(adapter, scrq);
1476 for (i = 0; i < next->tx_comp.num_comps; i++) {
1477 if (next->tx_comp.rcs[i]) {
1478 dev_err(dev, "tx error %x\n",
1479 next->tx_comp.rcs[i]);
1482 index = be32_to_cpu(next->tx_comp.correlators[i]);
1483 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1485 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1486 if (!txbuff->data_dma[j])
1489 txbuff->data_dma[j] = 0;
1490 txbuff->used_bounce = false;
1492 /* if sub_crq was sent indirectly */
1493 first = txbuff->indir_arr[0].generic.first;
1494 if (first == IBMVNIC_CRQ_CMD) {
1495 dma_unmap_single(dev, txbuff->indir_dma,
1496 sizeof(txbuff->indir_arr),
1500 if (txbuff->last_frag) {
1501 if (atomic_sub_return(next->tx_comp.num_comps,
1503 (adapter->req_tx_entries_per_subcrq / 2) &&
1504 netif_subqueue_stopped(adapter->netdev,
1506 netif_wake_subqueue(adapter->netdev,
1508 netdev_dbg(adapter->netdev,
1509 "Started queue %d\n",
1513 dev_kfree_skb_any(txbuff->skb);
1516 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1517 producer_index] = index;
1518 adapter->tx_pool[pool].producer_index =
1519 (adapter->tx_pool[pool].producer_index + 1) %
1520 adapter->req_tx_entries_per_subcrq;
1522 /* remove tx_comp scrq*/
1523 next->tx_comp.first = 0;
1526 enable_scrq_irq(adapter, scrq);
1528 if (pending_scrq(adapter, scrq)) {
1529 disable_scrq_irq(adapter, scrq);
1536 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1538 struct ibmvnic_sub_crq_queue *scrq = instance;
1539 struct ibmvnic_adapter *adapter = scrq->adapter;
1541 disable_scrq_irq(adapter, scrq);
1542 ibmvnic_complete_tx(adapter, scrq);
1547 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1549 struct ibmvnic_sub_crq_queue *scrq = instance;
1550 struct ibmvnic_adapter *adapter = scrq->adapter;
1552 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1553 disable_scrq_irq(adapter, scrq);
1554 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1560 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1562 struct device *dev = &adapter->vdev->dev;
1563 struct ibmvnic_sub_crq_queue *scrq;
1567 for (i = 0; i < adapter->req_tx_queues; i++) {
1568 scrq = adapter->tx_scrq[i];
1569 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1573 dev_err(dev, "Error mapping irq\n");
1574 goto req_tx_irq_failed;
1577 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1578 0, "ibmvnic_tx", scrq);
1581 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1583 irq_dispose_mapping(scrq->irq);
1584 goto req_rx_irq_failed;
1588 for (i = 0; i < adapter->req_rx_queues; i++) {
1589 scrq = adapter->rx_scrq[i];
1590 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1593 dev_err(dev, "Error mapping irq\n");
1594 goto req_rx_irq_failed;
1596 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1597 0, "ibmvnic_rx", scrq);
1599 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1601 irq_dispose_mapping(scrq->irq);
1602 goto req_rx_irq_failed;
1608 for (j = 0; j < i; j++) {
1609 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1610 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1612 i = adapter->req_tx_queues;
1614 for (j = 0; j < i; j++) {
1615 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1616 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1618 release_sub_crqs(adapter);
1622 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1624 struct device *dev = &adapter->vdev->dev;
1625 struct ibmvnic_sub_crq_queue **allqueues;
1626 int registered_queues = 0;
1627 union ibmvnic_crq crq;
1633 /* Sub-CRQ entries are 32 byte long */
1634 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1636 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1637 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1638 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1639 goto allqueues_failed;
1642 /* Get the minimum between the queried max and the entries
1643 * that fit in our PAGE_SIZE
1645 adapter->req_tx_entries_per_subcrq =
1646 adapter->max_tx_entries_per_subcrq > entries_page ?
1647 entries_page : adapter->max_tx_entries_per_subcrq;
1648 adapter->req_rx_add_entries_per_subcrq =
1649 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1650 entries_page : adapter->max_rx_add_entries_per_subcrq;
1652 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1653 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1654 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1656 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1659 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1661 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1663 goto allqueues_failed;
1665 for (i = 0; i < total_queues; i++) {
1666 allqueues[i] = init_sub_crq_queue(adapter);
1667 if (!allqueues[i]) {
1668 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1671 registered_queues++;
1674 /* Make sure we were able to register the minimum number of queues */
1675 if (registered_queues <
1676 adapter->min_tx_queues + adapter->min_rx_queues) {
1677 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1681 /* Distribute the failed allocated queues*/
1682 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1683 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1686 if (adapter->req_rx_queues > adapter->min_rx_queues)
1687 adapter->req_rx_queues--;
1692 if (adapter->req_tx_queues > adapter->min_tx_queues)
1693 adapter->req_tx_queues--;
1700 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1701 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1702 if (!adapter->tx_scrq)
1705 for (i = 0; i < adapter->req_tx_queues; i++) {
1706 adapter->tx_scrq[i] = allqueues[i];
1707 adapter->tx_scrq[i]->pool_index = i;
1710 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1711 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1712 if (!adapter->rx_scrq)
1715 for (i = 0; i < adapter->req_rx_queues; i++) {
1716 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1717 adapter->rx_scrq[i]->scrq_num = i;
1720 memset(&crq, 0, sizeof(crq));
1721 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1722 crq.request_capability.cmd = REQUEST_CAPABILITY;
1724 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1725 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1726 atomic_inc(&adapter->running_cap_crqs);
1727 ibmvnic_send_crq(adapter, &crq);
1729 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1730 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1731 atomic_inc(&adapter->running_cap_crqs);
1732 ibmvnic_send_crq(adapter, &crq);
1734 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1735 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1736 atomic_inc(&adapter->running_cap_crqs);
1737 ibmvnic_send_crq(adapter, &crq);
1739 crq.request_capability.capability =
1740 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1741 crq.request_capability.number =
1742 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1743 atomic_inc(&adapter->running_cap_crqs);
1744 ibmvnic_send_crq(adapter, &crq);
1746 crq.request_capability.capability =
1747 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1748 crq.request_capability.number =
1749 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1750 atomic_inc(&adapter->running_cap_crqs);
1751 ibmvnic_send_crq(adapter, &crq);
1753 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1754 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1755 atomic_inc(&adapter->running_cap_crqs);
1756 ibmvnic_send_crq(adapter, &crq);
1758 if (adapter->netdev->flags & IFF_PROMISC) {
1759 if (adapter->promisc_supported) {
1760 crq.request_capability.capability =
1761 cpu_to_be16(PROMISC_REQUESTED);
1762 crq.request_capability.number = cpu_to_be64(1);
1763 atomic_inc(&adapter->running_cap_crqs);
1764 ibmvnic_send_crq(adapter, &crq);
1767 crq.request_capability.capability =
1768 cpu_to_be16(PROMISC_REQUESTED);
1769 crq.request_capability.number = cpu_to_be64(0);
1770 atomic_inc(&adapter->running_cap_crqs);
1771 ibmvnic_send_crq(adapter, &crq);
1779 kfree(adapter->tx_scrq);
1780 adapter->tx_scrq = NULL;
1782 for (i = 0; i < registered_queues; i++)
1783 release_sub_crq_queue(adapter, allqueues[i]);
1786 ibmvnic_remove(adapter->vdev);
1789 static int pending_scrq(struct ibmvnic_adapter *adapter,
1790 struct ibmvnic_sub_crq_queue *scrq)
1792 union sub_crq *entry = &scrq->msgs[scrq->cur];
1794 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1800 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1801 struct ibmvnic_sub_crq_queue *scrq)
1803 union sub_crq *entry;
1804 unsigned long flags;
1806 spin_lock_irqsave(&scrq->lock, flags);
1807 entry = &scrq->msgs[scrq->cur];
1808 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1809 if (++scrq->cur == scrq->size)
1814 spin_unlock_irqrestore(&scrq->lock, flags);
1819 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1821 struct ibmvnic_crq_queue *queue = &adapter->crq;
1822 union ibmvnic_crq *crq;
1824 crq = &queue->msgs[queue->cur];
1825 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1826 if (++queue->cur == queue->size)
1835 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1836 union sub_crq *sub_crq)
1838 unsigned int ua = adapter->vdev->unit_address;
1839 struct device *dev = &adapter->vdev->dev;
1840 u64 *u64_crq = (u64 *)sub_crq;
1843 netdev_dbg(adapter->netdev,
1844 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1845 (unsigned long int)cpu_to_be64(remote_handle),
1846 (unsigned long int)cpu_to_be64(u64_crq[0]),
1847 (unsigned long int)cpu_to_be64(u64_crq[1]),
1848 (unsigned long int)cpu_to_be64(u64_crq[2]),
1849 (unsigned long int)cpu_to_be64(u64_crq[3]));
1851 /* Make sure the hypervisor sees the complete request */
1854 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1855 cpu_to_be64(remote_handle),
1856 cpu_to_be64(u64_crq[0]),
1857 cpu_to_be64(u64_crq[1]),
1858 cpu_to_be64(u64_crq[2]),
1859 cpu_to_be64(u64_crq[3]));
1863 dev_warn(dev, "CRQ Queue closed\n");
1864 dev_err(dev, "Send error (rc=%d)\n", rc);
1870 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1871 u64 remote_handle, u64 ioba, u64 num_entries)
1873 unsigned int ua = adapter->vdev->unit_address;
1874 struct device *dev = &adapter->vdev->dev;
1877 /* Make sure the hypervisor sees the complete request */
1879 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1880 cpu_to_be64(remote_handle),
1885 dev_warn(dev, "CRQ Queue closed\n");
1886 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1892 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1893 union ibmvnic_crq *crq)
1895 unsigned int ua = adapter->vdev->unit_address;
1896 struct device *dev = &adapter->vdev->dev;
1897 u64 *u64_crq = (u64 *)crq;
1900 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1901 (unsigned long int)cpu_to_be64(u64_crq[0]),
1902 (unsigned long int)cpu_to_be64(u64_crq[1]));
1904 /* Make sure the hypervisor sees the complete request */
1907 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1908 cpu_to_be64(u64_crq[0]),
1909 cpu_to_be64(u64_crq[1]));
1913 dev_warn(dev, "CRQ Queue closed\n");
1914 dev_warn(dev, "Send error (rc=%d)\n", rc);
1920 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1922 union ibmvnic_crq crq;
1924 memset(&crq, 0, sizeof(crq));
1925 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1926 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1927 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1929 return ibmvnic_send_crq(adapter, &crq);
1932 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1934 union ibmvnic_crq crq;
1936 memset(&crq, 0, sizeof(crq));
1937 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1938 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1939 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1941 return ibmvnic_send_crq(adapter, &crq);
1944 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1946 union ibmvnic_crq crq;
1948 memset(&crq, 0, sizeof(crq));
1949 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1950 crq.version_exchange.cmd = VERSION_EXCHANGE;
1951 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1953 return ibmvnic_send_crq(adapter, &crq);
1956 static void send_login(struct ibmvnic_adapter *adapter)
1958 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1959 struct ibmvnic_login_buffer *login_buffer;
1960 struct ibmvnic_inflight_cmd *inflight_cmd;
1961 struct device *dev = &adapter->vdev->dev;
1962 dma_addr_t rsp_buffer_token;
1963 dma_addr_t buffer_token;
1964 size_t rsp_buffer_size;
1965 union ibmvnic_crq crq;
1966 unsigned long flags;
1973 sizeof(struct ibmvnic_login_buffer) +
1974 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1976 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1978 goto buf_alloc_failed;
1980 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1982 if (dma_mapping_error(dev, buffer_token)) {
1983 dev_err(dev, "Couldn't map login buffer\n");
1984 goto buf_map_failed;
1987 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1988 sizeof(u64) * adapter->req_tx_queues +
1989 sizeof(u64) * adapter->req_rx_queues +
1990 sizeof(u64) * adapter->req_rx_queues +
1991 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1993 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1994 if (!login_rsp_buffer)
1995 goto buf_rsp_alloc_failed;
1997 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1998 rsp_buffer_size, DMA_FROM_DEVICE);
1999 if (dma_mapping_error(dev, rsp_buffer_token)) {
2000 dev_err(dev, "Couldn't map login rsp buffer\n");
2001 goto buf_rsp_map_failed;
2003 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2004 if (!inflight_cmd) {
2005 dev_err(dev, "Couldn't allocate inflight_cmd\n");
2006 goto inflight_alloc_failed;
2008 adapter->login_buf = login_buffer;
2009 adapter->login_buf_token = buffer_token;
2010 adapter->login_buf_sz = buffer_size;
2011 adapter->login_rsp_buf = login_rsp_buffer;
2012 adapter->login_rsp_buf_token = rsp_buffer_token;
2013 adapter->login_rsp_buf_sz = rsp_buffer_size;
2015 login_buffer->len = cpu_to_be32(buffer_size);
2016 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2017 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2018 login_buffer->off_txcomp_subcrqs =
2019 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2020 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2021 login_buffer->off_rxcomp_subcrqs =
2022 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2023 sizeof(u64) * adapter->req_tx_queues);
2024 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2025 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2027 tx_list_p = (__be64 *)((char *)login_buffer +
2028 sizeof(struct ibmvnic_login_buffer));
2029 rx_list_p = (__be64 *)((char *)login_buffer +
2030 sizeof(struct ibmvnic_login_buffer) +
2031 sizeof(u64) * adapter->req_tx_queues);
2033 for (i = 0; i < adapter->req_tx_queues; i++) {
2034 if (adapter->tx_scrq[i]) {
2035 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2040 for (i = 0; i < adapter->req_rx_queues; i++) {
2041 if (adapter->rx_scrq[i]) {
2042 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2047 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2048 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2049 netdev_dbg(adapter->netdev, "%016lx\n",
2050 ((unsigned long int *)(adapter->login_buf))[i]);
2053 memset(&crq, 0, sizeof(crq));
2054 crq.login.first = IBMVNIC_CRQ_CMD;
2055 crq.login.cmd = LOGIN;
2056 crq.login.ioba = cpu_to_be32(buffer_token);
2057 crq.login.len = cpu_to_be32(buffer_size);
2059 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2061 spin_lock_irqsave(&adapter->inflight_lock, flags);
2062 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2063 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2065 ibmvnic_send_crq(adapter, &crq);
2069 inflight_alloc_failed:
2070 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
2073 kfree(login_rsp_buffer);
2074 buf_rsp_alloc_failed:
2075 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2077 kfree(login_buffer);
2082 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2085 union ibmvnic_crq crq;
2087 memset(&crq, 0, sizeof(crq));
2088 crq.request_map.first = IBMVNIC_CRQ_CMD;
2089 crq.request_map.cmd = REQUEST_MAP;
2090 crq.request_map.map_id = map_id;
2091 crq.request_map.ioba = cpu_to_be32(addr);
2092 crq.request_map.len = cpu_to_be32(len);
2093 ibmvnic_send_crq(adapter, &crq);
2096 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2098 union ibmvnic_crq crq;
2100 memset(&crq, 0, sizeof(crq));
2101 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2102 crq.request_unmap.cmd = REQUEST_UNMAP;
2103 crq.request_unmap.map_id = map_id;
2104 ibmvnic_send_crq(adapter, &crq);
2107 static void send_map_query(struct ibmvnic_adapter *adapter)
2109 union ibmvnic_crq crq;
2111 memset(&crq, 0, sizeof(crq));
2112 crq.query_map.first = IBMVNIC_CRQ_CMD;
2113 crq.query_map.cmd = QUERY_MAP;
2114 ibmvnic_send_crq(adapter, &crq);
2117 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2118 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2120 union ibmvnic_crq crq;
2122 atomic_set(&adapter->running_cap_crqs, 0);
2123 memset(&crq, 0, sizeof(crq));
2124 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2125 crq.query_capability.cmd = QUERY_CAPABILITY;
2127 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2128 atomic_inc(&adapter->running_cap_crqs);
2129 ibmvnic_send_crq(adapter, &crq);
2131 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2132 atomic_inc(&adapter->running_cap_crqs);
2133 ibmvnic_send_crq(adapter, &crq);
2135 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2136 atomic_inc(&adapter->running_cap_crqs);
2137 ibmvnic_send_crq(adapter, &crq);
2139 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2140 atomic_inc(&adapter->running_cap_crqs);
2141 ibmvnic_send_crq(adapter, &crq);
2143 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2144 atomic_inc(&adapter->running_cap_crqs);
2145 ibmvnic_send_crq(adapter, &crq);
2147 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2148 atomic_inc(&adapter->running_cap_crqs);
2149 ibmvnic_send_crq(adapter, &crq);
2151 crq.query_capability.capability =
2152 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2153 atomic_inc(&adapter->running_cap_crqs);
2154 ibmvnic_send_crq(adapter, &crq);
2156 crq.query_capability.capability =
2157 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2158 atomic_inc(&adapter->running_cap_crqs);
2159 ibmvnic_send_crq(adapter, &crq);
2161 crq.query_capability.capability =
2162 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2163 atomic_inc(&adapter->running_cap_crqs);
2164 ibmvnic_send_crq(adapter, &crq);
2166 crq.query_capability.capability =
2167 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2168 atomic_inc(&adapter->running_cap_crqs);
2169 ibmvnic_send_crq(adapter, &crq);
2171 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2172 atomic_inc(&adapter->running_cap_crqs);
2173 ibmvnic_send_crq(adapter, &crq);
2175 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2176 atomic_inc(&adapter->running_cap_crqs);
2177 ibmvnic_send_crq(adapter, &crq);
2179 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2180 atomic_inc(&adapter->running_cap_crqs);
2181 ibmvnic_send_crq(adapter, &crq);
2183 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2184 atomic_inc(&adapter->running_cap_crqs);
2185 ibmvnic_send_crq(adapter, &crq);
2187 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2188 atomic_inc(&adapter->running_cap_crqs);
2189 ibmvnic_send_crq(adapter, &crq);
2191 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2192 atomic_inc(&adapter->running_cap_crqs);
2193 ibmvnic_send_crq(adapter, &crq);
2195 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2196 atomic_inc(&adapter->running_cap_crqs);
2197 ibmvnic_send_crq(adapter, &crq);
2199 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2200 atomic_inc(&adapter->running_cap_crqs);
2201 ibmvnic_send_crq(adapter, &crq);
2203 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2204 atomic_inc(&adapter->running_cap_crqs);
2205 ibmvnic_send_crq(adapter, &crq);
2207 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2208 atomic_inc(&adapter->running_cap_crqs);
2209 ibmvnic_send_crq(adapter, &crq);
2211 crq.query_capability.capability =
2212 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2213 atomic_inc(&adapter->running_cap_crqs);
2214 ibmvnic_send_crq(adapter, &crq);
2216 crq.query_capability.capability =
2217 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2218 atomic_inc(&adapter->running_cap_crqs);
2219 ibmvnic_send_crq(adapter, &crq);
2221 crq.query_capability.capability =
2222 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2223 atomic_inc(&adapter->running_cap_crqs);
2224 ibmvnic_send_crq(adapter, &crq);
2226 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2227 atomic_inc(&adapter->running_cap_crqs);
2228 ibmvnic_send_crq(adapter, &crq);
2231 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2233 struct device *dev = &adapter->vdev->dev;
2234 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2235 union ibmvnic_crq crq;
2238 dma_unmap_single(dev, adapter->ip_offload_tok,
2239 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2241 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2242 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2243 netdev_dbg(adapter->netdev, "%016lx\n",
2244 ((unsigned long int *)(buf))[i]);
2246 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2247 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2248 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2249 buf->tcp_ipv4_chksum);
2250 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2251 buf->tcp_ipv6_chksum);
2252 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2253 buf->udp_ipv4_chksum);
2254 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2255 buf->udp_ipv6_chksum);
2256 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2257 buf->large_tx_ipv4);
2258 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2259 buf->large_tx_ipv6);
2260 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2261 buf->large_rx_ipv4);
2262 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2263 buf->large_rx_ipv6);
2264 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2265 buf->max_ipv4_header_size);
2266 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2267 buf->max_ipv6_header_size);
2268 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2269 buf->max_tcp_header_size);
2270 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2271 buf->max_udp_header_size);
2272 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2273 buf->max_large_tx_size);
2274 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2275 buf->max_large_rx_size);
2276 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2277 buf->ipv6_extension_header);
2278 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2279 buf->tcp_pseudosum_req);
2280 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2281 buf->num_ipv6_ext_headers);
2282 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2283 buf->off_ipv6_ext_headers);
2285 adapter->ip_offload_ctrl_tok =
2286 dma_map_single(dev, &adapter->ip_offload_ctrl,
2287 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2289 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2290 dev_err(dev, "Couldn't map ip offload control buffer\n");
2294 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2295 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2296 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2297 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2298 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2300 /* large_tx/rx disabled for now, additional features needed */
2301 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2302 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2303 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2304 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2306 adapter->netdev->features = NETIF_F_GSO;
2308 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2309 adapter->netdev->features |= NETIF_F_IP_CSUM;
2311 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2312 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2314 if ((adapter->netdev->features &
2315 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2316 adapter->netdev->features |= NETIF_F_RXCSUM;
2318 memset(&crq, 0, sizeof(crq));
2319 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2320 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2321 crq.control_ip_offload.len =
2322 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2323 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2324 ibmvnic_send_crq(adapter, &crq);
2327 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2328 struct ibmvnic_adapter *adapter)
2330 struct device *dev = &adapter->vdev->dev;
2331 struct ibmvnic_error_buff *error_buff, *tmp;
2332 unsigned long flags;
2336 if (!crq->request_error_rsp.rc.code) {
2337 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2338 crq->request_error_rsp.rc.code);
2342 spin_lock_irqsave(&adapter->error_list_lock, flags);
2343 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2344 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2346 list_del(&error_buff->list);
2349 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2352 dev_err(dev, "Couldn't find error id %x\n",
2353 be32_to_cpu(crq->request_error_rsp.error_id));
2357 dev_err(dev, "Detailed info for error id %x:",
2358 be32_to_cpu(crq->request_error_rsp.error_id));
2360 for (i = 0; i < error_buff->len; i++) {
2361 pr_cont("%02x", (int)error_buff->buff[i]);
2367 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2369 kfree(error_buff->buff);
2373 static void handle_error_indication(union ibmvnic_crq *crq,
2374 struct ibmvnic_adapter *adapter)
2376 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2377 struct ibmvnic_inflight_cmd *inflight_cmd;
2378 struct device *dev = &adapter->vdev->dev;
2379 struct ibmvnic_error_buff *error_buff;
2380 union ibmvnic_crq new_crq;
2381 unsigned long flags;
2383 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2384 crq->error_indication.
2385 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2386 be32_to_cpu(crq->error_indication.error_id),
2387 be16_to_cpu(crq->error_indication.error_cause));
2389 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2393 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2394 if (!error_buff->buff) {
2399 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2401 if (dma_mapping_error(dev, error_buff->dma)) {
2402 if (!firmware_has_feature(FW_FEATURE_CMO))
2403 dev_err(dev, "Couldn't map error buffer\n");
2404 kfree(error_buff->buff);
2409 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2410 if (!inflight_cmd) {
2411 dma_unmap_single(dev, error_buff->dma, detail_len,
2413 kfree(error_buff->buff);
2418 error_buff->len = detail_len;
2419 error_buff->error_id = crq->error_indication.error_id;
2421 spin_lock_irqsave(&adapter->error_list_lock, flags);
2422 list_add_tail(&error_buff->list, &adapter->errors);
2423 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2425 memset(&new_crq, 0, sizeof(new_crq));
2426 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2427 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2428 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2429 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2430 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2432 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2434 spin_lock_irqsave(&adapter->inflight_lock, flags);
2435 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2436 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2438 ibmvnic_send_crq(adapter, &new_crq);
2441 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2442 struct ibmvnic_adapter *adapter)
2444 struct net_device *netdev = adapter->netdev;
2445 struct device *dev = &adapter->vdev->dev;
2448 rc = crq->change_mac_addr_rsp.rc.code;
2450 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2453 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2457 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2458 struct ibmvnic_adapter *adapter)
2460 struct device *dev = &adapter->vdev->dev;
2464 atomic_dec(&adapter->running_cap_crqs);
2465 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2467 req_value = &adapter->req_tx_queues;
2471 req_value = &adapter->req_rx_queues;
2474 case REQ_RX_ADD_QUEUES:
2475 req_value = &adapter->req_rx_add_queues;
2478 case REQ_TX_ENTRIES_PER_SUBCRQ:
2479 req_value = &adapter->req_tx_entries_per_subcrq;
2480 name = "tx_entries_per_subcrq";
2482 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2483 req_value = &adapter->req_rx_add_entries_per_subcrq;
2484 name = "rx_add_entries_per_subcrq";
2487 req_value = &adapter->req_mtu;
2490 case PROMISC_REQUESTED:
2491 req_value = &adapter->promisc;
2495 dev_err(dev, "Got invalid cap request rsp %d\n",
2496 crq->request_capability.capability);
2500 switch (crq->request_capability_rsp.rc.code) {
2503 case PARTIALSUCCESS:
2504 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2506 (long int)be64_to_cpu(crq->request_capability_rsp.
2508 release_sub_crqs(adapter);
2509 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2510 init_sub_crqs(adapter, 1);
2513 dev_err(dev, "Error %d in request cap rsp\n",
2514 crq->request_capability_rsp.rc.code);
2518 /* Done receiving requested capabilities, query IP offload support */
2519 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2520 union ibmvnic_crq newcrq;
2521 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2522 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2523 &adapter->ip_offload_buf;
2525 adapter->wait_capability = false;
2526 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2530 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2531 if (!firmware_has_feature(FW_FEATURE_CMO))
2532 dev_err(dev, "Couldn't map offload buffer\n");
2536 memset(&newcrq, 0, sizeof(newcrq));
2537 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2538 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2539 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2540 newcrq.query_ip_offload.ioba =
2541 cpu_to_be32(adapter->ip_offload_tok);
2543 ibmvnic_send_crq(adapter, &newcrq);
2547 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2548 struct ibmvnic_adapter *adapter)
2550 struct device *dev = &adapter->vdev->dev;
2551 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2552 struct ibmvnic_login_buffer *login = adapter->login_buf;
2555 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2557 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2558 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2560 /* If the number of queues requested can't be allocated by the
2561 * server, the login response will return with code 1. We will need
2562 * to resend the login buffer with fewer queues requested.
2564 if (login_rsp_crq->generic.rc.code) {
2565 adapter->renegotiate = true;
2566 complete(&adapter->init_done);
2570 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2571 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2572 netdev_dbg(adapter->netdev, "%016lx\n",
2573 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2577 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2578 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2579 adapter->req_rx_add_queues !=
2580 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2581 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2582 ibmvnic_remove(adapter->vdev);
2585 complete(&adapter->init_done);
2590 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2591 struct ibmvnic_adapter *adapter)
2593 struct device *dev = &adapter->vdev->dev;
2594 u8 map_id = crq->request_map_rsp.map_id;
2600 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2601 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2603 rc = crq->request_map_rsp.rc.code;
2605 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2607 /* need to find and zero tx/rx_pool map_id */
2608 for (i = 0; i < tx_subcrqs; i++) {
2609 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2610 adapter->tx_pool[i].long_term_buff.map_id = 0;
2612 for (i = 0; i < rx_subcrqs; i++) {
2613 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2614 adapter->rx_pool[i].long_term_buff.map_id = 0;
2617 complete(&adapter->fw_done);
2620 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2621 struct ibmvnic_adapter *adapter)
2623 struct device *dev = &adapter->vdev->dev;
2626 rc = crq->request_unmap_rsp.rc.code;
2628 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2631 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2632 struct ibmvnic_adapter *adapter)
2634 struct net_device *netdev = adapter->netdev;
2635 struct device *dev = &adapter->vdev->dev;
2638 rc = crq->query_map_rsp.rc.code;
2640 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2643 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2644 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2645 crq->query_map_rsp.free_pages);
2648 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2649 struct ibmvnic_adapter *adapter)
2651 struct net_device *netdev = adapter->netdev;
2652 struct device *dev = &adapter->vdev->dev;
2655 atomic_dec(&adapter->running_cap_crqs);
2656 netdev_dbg(netdev, "Outstanding queries: %d\n",
2657 atomic_read(&adapter->running_cap_crqs));
2658 rc = crq->query_capability.rc.code;
2660 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2664 switch (be16_to_cpu(crq->query_capability.capability)) {
2666 adapter->min_tx_queues =
2667 be64_to_cpu(crq->query_capability.number);
2668 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2669 adapter->min_tx_queues);
2672 adapter->min_rx_queues =
2673 be64_to_cpu(crq->query_capability.number);
2674 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2675 adapter->min_rx_queues);
2677 case MIN_RX_ADD_QUEUES:
2678 adapter->min_rx_add_queues =
2679 be64_to_cpu(crq->query_capability.number);
2680 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2681 adapter->min_rx_add_queues);
2684 adapter->max_tx_queues =
2685 be64_to_cpu(crq->query_capability.number);
2686 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2687 adapter->max_tx_queues);
2690 adapter->max_rx_queues =
2691 be64_to_cpu(crq->query_capability.number);
2692 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2693 adapter->max_rx_queues);
2695 case MAX_RX_ADD_QUEUES:
2696 adapter->max_rx_add_queues =
2697 be64_to_cpu(crq->query_capability.number);
2698 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2699 adapter->max_rx_add_queues);
2701 case MIN_TX_ENTRIES_PER_SUBCRQ:
2702 adapter->min_tx_entries_per_subcrq =
2703 be64_to_cpu(crq->query_capability.number);
2704 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2705 adapter->min_tx_entries_per_subcrq);
2707 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2708 adapter->min_rx_add_entries_per_subcrq =
2709 be64_to_cpu(crq->query_capability.number);
2710 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2711 adapter->min_rx_add_entries_per_subcrq);
2713 case MAX_TX_ENTRIES_PER_SUBCRQ:
2714 adapter->max_tx_entries_per_subcrq =
2715 be64_to_cpu(crq->query_capability.number);
2716 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2717 adapter->max_tx_entries_per_subcrq);
2719 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2720 adapter->max_rx_add_entries_per_subcrq =
2721 be64_to_cpu(crq->query_capability.number);
2722 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2723 adapter->max_rx_add_entries_per_subcrq);
2725 case TCP_IP_OFFLOAD:
2726 adapter->tcp_ip_offload =
2727 be64_to_cpu(crq->query_capability.number);
2728 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2729 adapter->tcp_ip_offload);
2731 case PROMISC_SUPPORTED:
2732 adapter->promisc_supported =
2733 be64_to_cpu(crq->query_capability.number);
2734 netdev_dbg(netdev, "promisc_supported = %lld\n",
2735 adapter->promisc_supported);
2738 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2739 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2740 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2743 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2744 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2745 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2747 case MAX_MULTICAST_FILTERS:
2748 adapter->max_multicast_filters =
2749 be64_to_cpu(crq->query_capability.number);
2750 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2751 adapter->max_multicast_filters);
2753 case VLAN_HEADER_INSERTION:
2754 adapter->vlan_header_insertion =
2755 be64_to_cpu(crq->query_capability.number);
2756 if (adapter->vlan_header_insertion)
2757 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2758 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2759 adapter->vlan_header_insertion);
2761 case MAX_TX_SG_ENTRIES:
2762 adapter->max_tx_sg_entries =
2763 be64_to_cpu(crq->query_capability.number);
2764 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2765 adapter->max_tx_sg_entries);
2767 case RX_SG_SUPPORTED:
2768 adapter->rx_sg_supported =
2769 be64_to_cpu(crq->query_capability.number);
2770 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2771 adapter->rx_sg_supported);
2773 case OPT_TX_COMP_SUB_QUEUES:
2774 adapter->opt_tx_comp_sub_queues =
2775 be64_to_cpu(crq->query_capability.number);
2776 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2777 adapter->opt_tx_comp_sub_queues);
2779 case OPT_RX_COMP_QUEUES:
2780 adapter->opt_rx_comp_queues =
2781 be64_to_cpu(crq->query_capability.number);
2782 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2783 adapter->opt_rx_comp_queues);
2785 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2786 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2787 be64_to_cpu(crq->query_capability.number);
2788 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2789 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2791 case OPT_TX_ENTRIES_PER_SUBCRQ:
2792 adapter->opt_tx_entries_per_subcrq =
2793 be64_to_cpu(crq->query_capability.number);
2794 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2795 adapter->opt_tx_entries_per_subcrq);
2797 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2798 adapter->opt_rxba_entries_per_subcrq =
2799 be64_to_cpu(crq->query_capability.number);
2800 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2801 adapter->opt_rxba_entries_per_subcrq);
2803 case TX_RX_DESC_REQ:
2804 adapter->tx_rx_desc_req = crq->query_capability.number;
2805 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2806 adapter->tx_rx_desc_req);
2810 netdev_err(netdev, "Got invalid cap rsp %d\n",
2811 crq->query_capability.capability);
2815 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2816 adapter->wait_capability = false;
2817 init_sub_crqs(adapter, 0);
2818 /* We're done querying the capabilities, initialize sub-crqs */
2822 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
2824 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
2825 struct device *dev = &adapter->vdev->dev;
2826 struct ibmvnic_error_buff *error_buff, *tmp2;
2827 unsigned long flags;
2828 unsigned long flags2;
2830 spin_lock_irqsave(&adapter->inflight_lock, flags);
2831 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
2832 switch (inflight_cmd->crq.generic.cmd) {
2834 dma_unmap_single(dev, adapter->login_buf_token,
2835 adapter->login_buf_sz,
2837 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2838 adapter->login_rsp_buf_sz,
2840 kfree(adapter->login_rsp_buf);
2841 kfree(adapter->login_buf);
2843 case REQUEST_ERROR_INFO:
2844 spin_lock_irqsave(&adapter->error_list_lock, flags2);
2845 list_for_each_entry_safe(error_buff, tmp2,
2846 &adapter->errors, list) {
2847 dma_unmap_single(dev, error_buff->dma,
2850 kfree(error_buff->buff);
2851 list_del(&error_buff->list);
2854 spin_unlock_irqrestore(&adapter->error_list_lock,
2858 list_del(&inflight_cmd->list);
2859 kfree(inflight_cmd);
2861 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2864 static void ibmvnic_xport_event(struct work_struct *work)
2866 struct ibmvnic_adapter *adapter = container_of(work,
2867 struct ibmvnic_adapter,
2869 struct device *dev = &adapter->vdev->dev;
2872 ibmvnic_free_inflight(adapter);
2873 release_sub_crqs(adapter);
2874 if (adapter->migrated) {
2875 rc = ibmvnic_reenable_crq_queue(adapter);
2877 dev_err(dev, "Error after enable rc=%ld\n", rc);
2878 adapter->migrated = false;
2879 rc = ibmvnic_send_crq_init(adapter);
2881 dev_err(dev, "Error sending init rc=%ld\n", rc);
2885 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2886 struct ibmvnic_adapter *adapter)
2888 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2889 struct net_device *netdev = adapter->netdev;
2890 struct device *dev = &adapter->vdev->dev;
2891 u64 *u64_crq = (u64 *)crq;
2894 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2895 (unsigned long int)cpu_to_be64(u64_crq[0]),
2896 (unsigned long int)cpu_to_be64(u64_crq[1]));
2897 switch (gen_crq->first) {
2898 case IBMVNIC_CRQ_INIT_RSP:
2899 switch (gen_crq->cmd) {
2900 case IBMVNIC_CRQ_INIT:
2901 dev_info(dev, "Partner initialized\n");
2902 /* Send back a response */
2903 rc = ibmvnic_send_crq_init_complete(adapter);
2905 schedule_work(&adapter->vnic_crq_init);
2907 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2909 case IBMVNIC_CRQ_INIT_COMPLETE:
2910 dev_info(dev, "Partner initialization complete\n");
2911 send_version_xchg(adapter);
2914 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2917 case IBMVNIC_CRQ_XPORT_EVENT:
2918 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2919 dev_info(dev, "Re-enabling adapter\n");
2920 adapter->migrated = true;
2921 schedule_work(&adapter->ibmvnic_xport);
2922 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2923 dev_info(dev, "Backing device failover detected\n");
2924 netif_carrier_off(netdev);
2925 adapter->failover = true;
2927 /* The adapter lost the connection */
2928 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2930 schedule_work(&adapter->ibmvnic_xport);
2933 case IBMVNIC_CRQ_CMD_RSP:
2936 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2941 switch (gen_crq->cmd) {
2942 case VERSION_EXCHANGE_RSP:
2943 rc = crq->version_exchange_rsp.rc.code;
2945 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2948 dev_info(dev, "Partner protocol version is %d\n",
2949 crq->version_exchange_rsp.version);
2950 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2953 be16_to_cpu(crq->version_exchange_rsp.version);
2954 send_cap_queries(adapter);
2956 case QUERY_CAPABILITY_RSP:
2957 handle_query_cap_rsp(crq, adapter);
2960 handle_query_map_rsp(crq, adapter);
2962 case REQUEST_MAP_RSP:
2963 handle_request_map_rsp(crq, adapter);
2965 case REQUEST_UNMAP_RSP:
2966 handle_request_unmap_rsp(crq, adapter);
2968 case REQUEST_CAPABILITY_RSP:
2969 handle_request_cap_rsp(crq, adapter);
2972 netdev_dbg(netdev, "Got Login Response\n");
2973 handle_login_rsp(crq, adapter);
2975 case LOGICAL_LINK_STATE_RSP:
2976 netdev_dbg(netdev, "Got Logical Link State Response\n");
2977 adapter->logical_link_state =
2978 crq->logical_link_state_rsp.link_state;
2980 case LINK_STATE_INDICATION:
2981 netdev_dbg(netdev, "Got Logical Link State Indication\n");
2982 adapter->phys_link_state =
2983 crq->link_state_indication.phys_link_state;
2984 adapter->logical_link_state =
2985 crq->link_state_indication.logical_link_state;
2987 case CHANGE_MAC_ADDR_RSP:
2988 netdev_dbg(netdev, "Got MAC address change Response\n");
2989 handle_change_mac_rsp(crq, adapter);
2991 case ERROR_INDICATION:
2992 netdev_dbg(netdev, "Got Error Indication\n");
2993 handle_error_indication(crq, adapter);
2995 case REQUEST_ERROR_RSP:
2996 netdev_dbg(netdev, "Got Error Detail Response\n");
2997 handle_error_info_rsp(crq, adapter);
2999 case REQUEST_STATISTICS_RSP:
3000 netdev_dbg(netdev, "Got Statistics Response\n");
3001 complete(&adapter->stats_done);
3003 case QUERY_IP_OFFLOAD_RSP:
3004 netdev_dbg(netdev, "Got Query IP offload Response\n");
3005 handle_query_ip_offload_rsp(adapter);
3007 case MULTICAST_CTRL_RSP:
3008 netdev_dbg(netdev, "Got multicast control Response\n");
3010 case CONTROL_IP_OFFLOAD_RSP:
3011 netdev_dbg(netdev, "Got Control IP offload Response\n");
3012 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3013 sizeof(adapter->ip_offload_ctrl),
3015 complete(&adapter->init_done);
3017 case COLLECT_FW_TRACE_RSP:
3018 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3019 complete(&adapter->fw_done);
3022 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3027 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3029 struct ibmvnic_adapter *adapter = instance;
3030 unsigned long flags;
3032 spin_lock_irqsave(&adapter->crq.lock, flags);
3033 vio_disable_interrupts(adapter->vdev);
3034 tasklet_schedule(&adapter->tasklet);
3035 spin_unlock_irqrestore(&adapter->crq.lock, flags);
3039 static void ibmvnic_tasklet(void *data)
3041 struct ibmvnic_adapter *adapter = data;
3042 struct ibmvnic_crq_queue *queue = &adapter->crq;
3043 struct vio_dev *vdev = adapter->vdev;
3044 union ibmvnic_crq *crq;
3045 unsigned long flags;
3048 spin_lock_irqsave(&queue->lock, flags);
3049 vio_disable_interrupts(vdev);
3051 /* Pull all the valid messages off the CRQ */
3052 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3053 ibmvnic_handle_crq(crq, adapter);
3054 crq->generic.first = 0;
3056 vio_enable_interrupts(vdev);
3057 crq = ibmvnic_next_crq(adapter);
3059 vio_disable_interrupts(vdev);
3060 ibmvnic_handle_crq(crq, adapter);
3061 crq->generic.first = 0;
3063 /* remain in tasklet until all
3064 * capabilities responses are received
3066 if (!adapter->wait_capability)
3070 /* if capabilities CRQ's were sent in this tasklet, the following
3071 * tasklet must wait until all responses are received
3073 if (atomic_read(&adapter->running_cap_crqs) != 0)
3074 adapter->wait_capability = true;
3075 spin_unlock_irqrestore(&queue->lock, flags);
3078 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3080 struct vio_dev *vdev = adapter->vdev;
3084 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3085 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3088 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3093 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3095 struct ibmvnic_crq_queue *crq = &adapter->crq;
3096 struct device *dev = &adapter->vdev->dev;
3097 struct vio_dev *vdev = adapter->vdev;
3102 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3103 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3105 /* Clean out the queue */
3106 memset(crq->msgs, 0, PAGE_SIZE);
3109 /* And re-open it again */
3110 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3111 crq->msg_token, PAGE_SIZE);
3114 /* Adapter is good, but other end is not ready */
3115 dev_warn(dev, "Partner adapter not ready\n");
3117 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3122 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3124 struct ibmvnic_crq_queue *crq = &adapter->crq;
3125 struct vio_dev *vdev = adapter->vdev;
3131 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3132 free_irq(vdev->irq, adapter);
3133 tasklet_kill(&adapter->tasklet);
3135 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3136 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3138 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3140 free_page((unsigned long)crq->msgs);
3144 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3146 struct ibmvnic_crq_queue *crq = &adapter->crq;
3147 struct device *dev = &adapter->vdev->dev;
3148 struct vio_dev *vdev = adapter->vdev;
3149 int rc, retrc = -ENOMEM;
3154 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3155 /* Should we allocate more than one page? */
3160 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3161 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3163 if (dma_mapping_error(dev, crq->msg_token))
3166 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3167 crq->msg_token, PAGE_SIZE);
3169 if (rc == H_RESOURCE)
3170 /* maybe kexecing and resource is busy. try a reset */
3171 rc = ibmvnic_reset_crq(adapter);
3174 if (rc == H_CLOSED) {
3175 dev_warn(dev, "Partner adapter not ready\n");
3177 dev_warn(dev, "Error %d opening adapter\n", rc);
3178 goto reg_crq_failed;
3183 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3184 (unsigned long)adapter);
3186 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3187 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3190 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3192 goto req_irq_failed;
3195 rc = vio_enable_interrupts(vdev);
3197 dev_err(dev, "Error %d enabling interrupts\n", rc);
3198 goto req_irq_failed;
3202 spin_lock_init(&crq->lock);
3207 tasklet_kill(&adapter->tasklet);
3209 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3210 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3212 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3214 free_page((unsigned long)crq->msgs);
3219 static void handle_crq_init_rsp(struct work_struct *work)
3221 struct ibmvnic_adapter *adapter = container_of(work,
3222 struct ibmvnic_adapter,
3224 struct device *dev = &adapter->vdev->dev;
3225 struct net_device *netdev = adapter->netdev;
3226 unsigned long timeout = msecs_to_jiffies(30000);
3227 bool restart = false;
3230 if (adapter->failover) {
3231 release_sub_crqs(adapter);
3232 if (netif_running(netdev)) {
3233 netif_tx_disable(netdev);
3234 ibmvnic_close(netdev);
3239 reinit_completion(&adapter->init_done);
3240 send_version_xchg(adapter);
3241 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3242 dev_err(dev, "Passive init timeout\n");
3246 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3248 if (adapter->failover) {
3249 adapter->failover = false;
3251 rc = ibmvnic_open(netdev);
3253 goto restart_failed;
3255 netif_carrier_on(netdev);
3259 rc = register_netdev(netdev);
3262 "failed to register netdev rc=%d\n", rc);
3263 goto register_failed;
3265 dev_info(dev, "ibmvnic registered\n");
3270 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3272 release_sub_crqs(adapter);
3274 dev_err(dev, "Passive initialization was not successful\n");
3277 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3279 struct device *dev = &adapter->vdev->dev;
3280 unsigned long timeout = msecs_to_jiffies(30000);
3283 rc = init_crq_queue(adapter);
3285 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3289 rc = init_stats_token(adapter);
3291 release_crq_queue(adapter);
3295 init_completion(&adapter->init_done);
3296 ibmvnic_send_crq_init(adapter);
3297 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3298 dev_err(dev, "Initialization sequence timed out\n");
3299 release_crq_queue(adapter);
3306 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3308 struct ibmvnic_adapter *adapter;
3309 struct net_device *netdev;
3310 unsigned char *mac_addr_p;
3313 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3316 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3317 VETH_MAC_ADDR, NULL);
3320 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3321 __FILE__, __LINE__);
3325 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3326 IBMVNIC_MAX_TX_QUEUES);
3330 adapter = netdev_priv(netdev);
3331 dev_set_drvdata(&dev->dev, netdev);
3332 adapter->vdev = dev;
3333 adapter->netdev = netdev;
3334 adapter->failover = false;
3336 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3337 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3338 netdev->irq = dev->irq;
3339 netdev->netdev_ops = &ibmvnic_netdev_ops;
3340 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3341 SET_NETDEV_DEV(netdev, &dev->dev);
3343 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3344 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3346 spin_lock_init(&adapter->stats_lock);
3348 INIT_LIST_HEAD(&adapter->errors);
3349 INIT_LIST_HEAD(&adapter->inflight);
3350 spin_lock_init(&adapter->error_list_lock);
3351 spin_lock_init(&adapter->inflight_lock);
3353 rc = ibmvnic_init(adapter);
3355 free_netdev(netdev);
3359 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3360 adapter->is_closed = false;
3362 rc = register_netdev(netdev);
3364 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3365 free_netdev(netdev);
3368 dev_info(&dev->dev, "ibmvnic registered\n");
3373 static int ibmvnic_remove(struct vio_dev *dev)
3375 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3377 unregister_netdev(netdev);
3378 free_netdev(netdev);
3379 dev_set_drvdata(&dev->dev, NULL);
3384 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3386 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3387 struct ibmvnic_adapter *adapter;
3388 struct iommu_table *tbl;
3389 unsigned long ret = 0;
3392 tbl = get_iommu_table_base(&vdev->dev);
3394 /* netdev inits at probe time along with the structures we need below*/
3396 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3398 adapter = netdev_priv(netdev);
3400 ret += PAGE_SIZE; /* the crq message queue */
3401 ret += adapter->bounce_buffer_size;
3402 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3404 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3405 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3407 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3409 ret += adapter->rx_pool[i].size *
3410 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3415 static int ibmvnic_resume(struct device *dev)
3417 struct net_device *netdev = dev_get_drvdata(dev);
3418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3421 /* kick the interrupt handlers just in case we lost an interrupt */
3422 for (i = 0; i < adapter->req_rx_queues; i++)
3423 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3424 adapter->rx_scrq[i]);
3429 static struct vio_device_id ibmvnic_device_table[] = {
3430 {"network", "IBM,vnic"},
3433 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3435 static const struct dev_pm_ops ibmvnic_pm_ops = {
3436 .resume = ibmvnic_resume
3439 static struct vio_driver ibmvnic_driver = {
3440 .id_table = ibmvnic_device_table,
3441 .probe = ibmvnic_probe,
3442 .remove = ibmvnic_remove,
3443 .get_desired_dma = ibmvnic_get_desired_dma,
3444 .name = ibmvnic_driver_name,
3445 .pm = &ibmvnic_pm_ops,
3448 /* module functions */
3449 static int __init ibmvnic_module_init(void)
3451 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3452 IBMVNIC_DRIVER_VERSION);
3454 return vio_register_driver(&ibmvnic_driver);
3457 static void __exit ibmvnic_module_exit(void)
3459 vio_unregister_driver(&ibmvnic_driver);
3462 module_init(ibmvnic_module_init);
3463 module_exit(ibmvnic_module_exit);