1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
80 static const char ibmvnic_driver_name[] = "ibmvnic";
81 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
84 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
89 static int ibmvnic_remove(struct vio_dev *);
90 static void release_sub_crqs(struct ibmvnic_adapter *);
91 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115 static int ibmvnic_init(struct ibmvnic_adapter *);
116 static void release_crq_queue(struct ibmvnic_adapter *);
118 struct ibmvnic_stat {
119 char name[ETH_GSTRING_LEN];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
152 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153 unsigned long length, unsigned long *number,
156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
159 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 /* net_device_ops functions */
168 static void init_rx_pool(struct ibmvnic_adapter *adapter,
169 struct ibmvnic_rx_pool *rx_pool, int num, int index,
170 int buff_size, int active)
172 netdev_dbg(adapter->netdev,
173 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
174 index, num, buff_size);
176 rx_pool->index = index;
177 rx_pool->buff_size = buff_size;
178 rx_pool->active = active;
181 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
182 struct ibmvnic_long_term_buff *ltb, int size)
184 struct device *dev = &adapter->vdev->dev;
187 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
191 dev_err(dev, "Couldn't alloc long term buffer\n");
194 ltb->map_id = adapter->map_id;
197 init_completion(&adapter->fw_done);
198 send_request_map(adapter, ltb->addr,
199 ltb->size, ltb->map_id);
200 wait_for_completion(&adapter->fw_done);
204 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
205 struct ibmvnic_long_term_buff *ltb)
207 struct device *dev = &adapter->vdev->dev;
209 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
210 if (!adapter->failover)
211 send_request_unmap(adapter, ltb->map_id);
214 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
215 struct ibmvnic_rx_pool *pool)
217 struct device *dev = &adapter->vdev->dev;
220 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
224 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
227 if (!pool->rx_buff) {
228 dev_err(dev, "Couldn't alloc rx buffers\n");
229 kfree(pool->free_map);
233 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
234 pool->size * pool->buff_size)) {
235 kfree(pool->free_map);
236 kfree(pool->rx_buff);
240 for (i = 0; i < pool->size; ++i)
241 pool->free_map[i] = i;
243 atomic_set(&pool->available, 0);
244 pool->next_alloc = 0;
250 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
251 struct ibmvnic_rx_pool *pool)
253 int count = pool->size - atomic_read(&pool->available);
254 struct device *dev = &adapter->vdev->dev;
255 int buffers_added = 0;
256 unsigned long lpar_rc;
257 union sub_crq sub_crq;
267 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
268 be32_to_cpu(adapter->login_rsp_buf->
271 for (i = 0; i < count; ++i) {
272 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
274 dev_err(dev, "Couldn't replenish rx buff\n");
275 adapter->replenish_no_mem++;
279 index = pool->free_map[pool->next_free];
281 if (pool->rx_buff[index].skb)
282 dev_err(dev, "Inconsistent free_map!\n");
284 /* Copy the skb to the long term mapped DMA buffer */
285 offset = index * pool->buff_size;
286 dst = pool->long_term_buff.buff + offset;
287 memset(dst, 0, pool->buff_size);
288 dma_addr = pool->long_term_buff.addr + offset;
289 pool->rx_buff[index].data = dst;
291 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
292 pool->rx_buff[index].dma = dma_addr;
293 pool->rx_buff[index].skb = skb;
294 pool->rx_buff[index].pool_index = pool->index;
295 pool->rx_buff[index].size = pool->buff_size;
297 memset(&sub_crq, 0, sizeof(sub_crq));
298 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
299 sub_crq.rx_add.correlator =
300 cpu_to_be64((u64)&pool->rx_buff[index]);
301 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
302 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
304 /* The length field of the sCRQ is defined to be 24 bits so the
305 * buffer size needs to be left shifted by a byte before it is
306 * converted to big endian to prevent the last byte from being
309 #ifdef __LITTLE_ENDIAN__
312 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
314 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
316 if (lpar_rc != H_SUCCESS)
320 adapter->replenish_add_buff_success++;
321 pool->next_free = (pool->next_free + 1) % pool->size;
323 atomic_add(buffers_added, &pool->available);
327 dev_info(dev, "replenish pools failure\n");
328 pool->free_map[pool->next_free] = index;
329 pool->rx_buff[index].skb = NULL;
330 if (!dma_mapping_error(dev, dma_addr))
331 dma_unmap_single(dev, dma_addr, pool->buff_size,
334 dev_kfree_skb_any(skb);
335 adapter->replenish_add_buff_failure++;
336 atomic_add(buffers_added, &pool->available);
339 static void replenish_pools(struct ibmvnic_adapter *adapter)
343 if (adapter->migrated)
346 adapter->replenish_task_cycles++;
347 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
349 if (adapter->rx_pool[i].active)
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
354 static void free_rx_pool(struct ibmvnic_adapter *adapter,
355 struct ibmvnic_rx_pool *pool)
359 kfree(pool->free_map);
360 pool->free_map = NULL;
365 for (i = 0; i < pool->size; i++) {
366 if (pool->rx_buff[i].skb) {
367 dev_kfree_skb_any(pool->rx_buff[i].skb);
368 pool->rx_buff[i].skb = NULL;
371 kfree(pool->rx_buff);
372 pool->rx_buff = NULL;
375 static int ibmvnic_login(struct net_device *netdev)
377 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
378 unsigned long timeout = msecs_to_jiffies(30000);
379 struct device *dev = &adapter->vdev->dev;
382 if (adapter->renegotiate) {
383 adapter->renegotiate = false;
384 release_sub_crqs_no_irqs(adapter);
386 reinit_completion(&adapter->init_done);
387 send_cap_queries(adapter);
388 if (!wait_for_completion_timeout(&adapter->init_done,
390 dev_err(dev, "Capabilities query timeout\n");
395 reinit_completion(&adapter->init_done);
397 if (!wait_for_completion_timeout(&adapter->init_done,
399 dev_err(dev, "Login timeout\n");
402 } while (adapter->renegotiate);
407 static int ibmvnic_open(struct net_device *netdev)
409 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
410 struct device *dev = &adapter->vdev->dev;
411 struct ibmvnic_tx_pool *tx_pool;
412 union ibmvnic_crq crq;
419 if (adapter->is_closed) {
420 rc = ibmvnic_init(adapter);
425 rc = ibmvnic_login(netdev);
429 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
431 dev_err(dev, "failed to set the number of tx queues\n");
435 rc = init_sub_crq_irqs(adapter);
437 dev_err(dev, "failed to initialize sub crq irqs\n");
442 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
444 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
445 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
446 be32_to_cpu(adapter->login_rsp_buf->
447 off_rxadd_buff_size));
449 adapter->napi = kcalloc(adapter->req_rx_queues,
450 sizeof(struct napi_struct), GFP_KERNEL);
452 goto alloc_napi_failed;
453 for (i = 0; i < adapter->req_rx_queues; i++) {
454 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
456 napi_enable(&adapter->napi[i]);
459 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
461 if (!adapter->rx_pool)
462 goto rx_pool_arr_alloc_failed;
463 send_map_query(adapter);
464 for (i = 0; i < rxadd_subcrqs; i++) {
465 init_rx_pool(adapter, &adapter->rx_pool[i],
466 adapter->req_rx_add_entries_per_subcrq, i,
467 be64_to_cpu(size_array[i]), 1);
468 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
469 dev_err(dev, "Couldn't alloc rx pool\n");
470 goto rx_pool_alloc_failed;
474 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
476 if (!adapter->tx_pool)
477 goto tx_pool_arr_alloc_failed;
478 for (i = 0; i < tx_subcrqs; i++) {
479 tx_pool = &adapter->tx_pool[i];
481 kcalloc(adapter->req_tx_entries_per_subcrq,
482 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
483 if (!tx_pool->tx_buff)
484 goto tx_pool_alloc_failed;
486 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
487 adapter->req_tx_entries_per_subcrq *
489 goto tx_ltb_alloc_failed;
492 kcalloc(adapter->req_tx_entries_per_subcrq,
493 sizeof(int), GFP_KERNEL);
494 if (!tx_pool->free_map)
495 goto tx_fm_alloc_failed;
497 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
498 tx_pool->free_map[j] = j;
500 tx_pool->consumer_index = 0;
501 tx_pool->producer_index = 0;
503 adapter->bounce_buffer_size =
504 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
505 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
507 if (!adapter->bounce_buffer)
508 goto bounce_alloc_failed;
510 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
511 adapter->bounce_buffer_size,
513 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
514 dev_err(dev, "Couldn't map tx bounce buffer\n");
515 goto bounce_map_failed;
517 replenish_pools(adapter);
519 /* We're ready to receive frames, enable the sub-crq interrupts and
520 * set the logical link state to up
522 for (i = 0; i < adapter->req_rx_queues; i++)
523 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
525 for (i = 0; i < adapter->req_tx_queues; i++)
526 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
528 memset(&crq, 0, sizeof(crq));
529 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
530 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
531 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
532 ibmvnic_send_crq(adapter, &crq);
534 netif_tx_start_all_queues(netdev);
535 adapter->is_closed = false;
540 kfree(adapter->bounce_buffer);
543 kfree(adapter->tx_pool[i].free_map);
545 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
547 kfree(adapter->tx_pool[i].tx_buff);
548 tx_pool_alloc_failed:
549 for (j = 0; j < i; j++) {
550 kfree(adapter->tx_pool[j].tx_buff);
551 free_long_term_buff(adapter,
552 &adapter->tx_pool[j].long_term_buff);
553 kfree(adapter->tx_pool[j].free_map);
555 kfree(adapter->tx_pool);
556 adapter->tx_pool = NULL;
557 tx_pool_arr_alloc_failed:
559 rx_pool_alloc_failed:
560 for (j = 0; j < i; j++) {
561 free_rx_pool(adapter, &adapter->rx_pool[j]);
562 free_long_term_buff(adapter,
563 &adapter->rx_pool[j].long_term_buff);
565 kfree(adapter->rx_pool);
566 adapter->rx_pool = NULL;
567 rx_pool_arr_alloc_failed:
568 for (i = 0; i < adapter->req_rx_queues; i++)
569 napi_disable(&adapter->napi[i]);
571 release_sub_crqs(adapter);
575 static void ibmvnic_release_resources(struct ibmvnic_adapter *adapter)
577 struct device *dev = &adapter->vdev->dev;
578 int tx_scrqs, rx_scrqs;
581 if (adapter->bounce_buffer) {
582 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
583 dma_unmap_single(&adapter->vdev->dev,
584 adapter->bounce_buffer_dma,
585 adapter->bounce_buffer_size,
587 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
589 kfree(adapter->bounce_buffer);
590 adapter->bounce_buffer = NULL;
593 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
594 for (i = 0; i < tx_scrqs; i++) {
595 struct ibmvnic_tx_pool *tx_pool = &adapter->tx_pool[i];
597 kfree(tx_pool->tx_buff);
598 free_long_term_buff(adapter, &tx_pool->long_term_buff);
599 kfree(tx_pool->free_map);
601 kfree(adapter->tx_pool);
602 adapter->tx_pool = NULL;
604 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
605 for (i = 0; i < rx_scrqs; i++) {
606 struct ibmvnic_rx_pool *rx_pool = &adapter->rx_pool[i];
608 free_rx_pool(adapter, rx_pool);
609 free_long_term_buff(adapter, &rx_pool->long_term_buff);
611 kfree(adapter->rx_pool);
612 adapter->rx_pool = NULL;
614 release_sub_crqs(adapter);
615 release_crq_queue(adapter);
617 if (adapter->stats_token)
618 dma_unmap_single(dev, adapter->stats_token,
619 sizeof(struct ibmvnic_statistics),
623 static int ibmvnic_close(struct net_device *netdev)
625 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
626 union ibmvnic_crq crq;
629 adapter->closing = true;
631 for (i = 0; i < adapter->req_rx_queues; i++)
632 napi_disable(&adapter->napi[i]);
634 if (!adapter->failover)
635 netif_tx_stop_all_queues(netdev);
637 memset(&crq, 0, sizeof(crq));
638 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
639 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
640 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
641 ibmvnic_send_crq(adapter, &crq);
643 ibmvnic_release_resources(adapter);
645 adapter->is_closed = true;
646 adapter->closing = false;
651 * build_hdr_data - creates L2/L3/L4 header data buffer
652 * @hdr_field - bitfield determining needed headers
653 * @skb - socket buffer
654 * @hdr_len - array of header lengths
655 * @tot_len - total length of data
657 * Reads hdr_field to determine which headers are needed by firmware.
658 * Builds a buffer containing these headers. Saves individual header
659 * lengths and total buffer length to be used to build descriptors.
661 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
662 int *hdr_len, u8 *hdr_data)
667 hdr_len[0] = sizeof(struct ethhdr);
669 if (skb->protocol == htons(ETH_P_IP)) {
670 hdr_len[1] = ip_hdr(skb)->ihl * 4;
671 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
672 hdr_len[2] = tcp_hdrlen(skb);
673 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
674 hdr_len[2] = sizeof(struct udphdr);
675 } else if (skb->protocol == htons(ETH_P_IPV6)) {
676 hdr_len[1] = sizeof(struct ipv6hdr);
677 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
678 hdr_len[2] = tcp_hdrlen(skb);
679 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
680 hdr_len[2] = sizeof(struct udphdr);
683 memset(hdr_data, 0, 120);
684 if ((hdr_field >> 6) & 1) {
685 hdr = skb_mac_header(skb);
686 memcpy(hdr_data, hdr, hdr_len[0]);
690 if ((hdr_field >> 5) & 1) {
691 hdr = skb_network_header(skb);
692 memcpy(hdr_data + len, hdr, hdr_len[1]);
696 if ((hdr_field >> 4) & 1) {
697 hdr = skb_transport_header(skb);
698 memcpy(hdr_data + len, hdr, hdr_len[2]);
705 * create_hdr_descs - create header and header extension descriptors
706 * @hdr_field - bitfield determining needed headers
707 * @data - buffer containing header data
708 * @len - length of data buffer
709 * @hdr_len - array of individual header lengths
710 * @scrq_arr - descriptor array
712 * Creates header and, if needed, header extension descriptors and
713 * places them in a descriptor array, scrq_arr
716 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
717 union sub_crq *scrq_arr)
719 union sub_crq hdr_desc;
724 while (tmp_len > 0) {
725 cur = hdr_data + len - tmp_len;
727 memset(&hdr_desc, 0, sizeof(hdr_desc));
728 if (cur != hdr_data) {
729 data = hdr_desc.hdr_ext.data;
730 tmp = tmp_len > 29 ? 29 : tmp_len;
731 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
732 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
733 hdr_desc.hdr_ext.len = tmp;
735 data = hdr_desc.hdr.data;
736 tmp = tmp_len > 24 ? 24 : tmp_len;
737 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
738 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
739 hdr_desc.hdr.len = tmp;
740 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
741 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
742 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
743 hdr_desc.hdr.flag = hdr_field << 1;
745 memcpy(data, cur, tmp);
747 *scrq_arr = hdr_desc;
753 * build_hdr_descs_arr - build a header descriptor array
754 * @skb - socket buffer
755 * @num_entries - number of descriptors to be sent
756 * @subcrq - first TX descriptor
757 * @hdr_field - bit field determining which headers will be sent
759 * This function will build a TX descriptor array with applicable
760 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
763 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
764 int *num_entries, u8 hdr_field)
766 int hdr_len[3] = {0, 0, 0};
768 u8 *hdr_data = txbuff->hdr_data;
770 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
775 num_entries += len % 29 ? len / 29 + 1 : len / 29;
776 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
777 txbuff->indir_arr + 1);
780 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
782 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
783 int queue_num = skb_get_queue_mapping(skb);
784 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
785 struct device *dev = &adapter->vdev->dev;
786 struct ibmvnic_tx_buff *tx_buff = NULL;
787 struct ibmvnic_sub_crq_queue *tx_scrq;
788 struct ibmvnic_tx_pool *tx_pool;
789 unsigned int tx_send_failed = 0;
790 unsigned int tx_map_failed = 0;
791 unsigned int tx_dropped = 0;
792 unsigned int tx_packets = 0;
793 unsigned int tx_bytes = 0;
794 dma_addr_t data_dma_addr;
795 struct netdev_queue *txq;
796 bool used_bounce = false;
797 unsigned long lpar_rc;
798 union sub_crq tx_crq;
806 tx_pool = &adapter->tx_pool[queue_num];
807 tx_scrq = adapter->tx_scrq[queue_num];
808 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
809 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
810 be32_to_cpu(adapter->login_rsp_buf->
811 off_txsubm_subcrqs));
812 if (adapter->migrated) {
815 ret = NETDEV_TX_BUSY;
819 index = tx_pool->free_map[tx_pool->consumer_index];
820 offset = index * adapter->req_mtu;
821 dst = tx_pool->long_term_buff.buff + offset;
822 memset(dst, 0, adapter->req_mtu);
823 skb_copy_from_linear_data(skb, dst, skb->len);
824 data_dma_addr = tx_pool->long_term_buff.addr + offset;
826 tx_pool->consumer_index =
827 (tx_pool->consumer_index + 1) %
828 adapter->req_tx_entries_per_subcrq;
830 tx_buff = &tx_pool->tx_buff[index];
832 tx_buff->data_dma[0] = data_dma_addr;
833 tx_buff->data_len[0] = skb->len;
834 tx_buff->index = index;
835 tx_buff->pool_index = queue_num;
836 tx_buff->last_frag = true;
837 tx_buff->used_bounce = used_bounce;
839 memset(&tx_crq, 0, sizeof(tx_crq));
840 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
841 tx_crq.v1.type = IBMVNIC_TX_DESC;
842 tx_crq.v1.n_crq_elem = 1;
844 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
845 tx_crq.v1.correlator = cpu_to_be32(index);
846 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
847 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
848 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
850 if (adapter->vlan_header_insertion) {
851 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
852 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
855 if (skb->protocol == htons(ETH_P_IP)) {
856 if (ip_hdr(skb)->version == 4)
857 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
858 else if (ip_hdr(skb)->version == 6)
859 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
861 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
862 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
863 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
864 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
867 if (skb->ip_summed == CHECKSUM_PARTIAL) {
868 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
871 /* determine if l2/3/4 headers are sent to firmware */
872 if ((*hdrs >> 7) & 1 &&
873 (skb->protocol == htons(ETH_P_IP) ||
874 skb->protocol == htons(ETH_P_IPV6))) {
875 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
876 tx_crq.v1.n_crq_elem = num_entries;
877 tx_buff->indir_arr[0] = tx_crq;
878 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
879 sizeof(tx_buff->indir_arr),
881 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
882 if (!firmware_has_feature(FW_FEATURE_CMO))
883 dev_err(dev, "tx: unable to map descriptor array\n");
886 ret = NETDEV_TX_BUSY;
889 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
890 (u64)tx_buff->indir_dma,
893 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
896 if (lpar_rc != H_SUCCESS) {
897 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
899 if (tx_pool->consumer_index == 0)
900 tx_pool->consumer_index =
901 adapter->req_tx_entries_per_subcrq - 1;
903 tx_pool->consumer_index--;
907 ret = NETDEV_TX_BUSY;
911 atomic_inc(&tx_scrq->used);
913 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
914 netdev_info(netdev, "Stopping queue %d\n", queue_num);
915 netif_stop_subqueue(netdev, queue_num);
919 tx_bytes += skb->len;
920 txq->trans_start = jiffies;
924 netdev->stats.tx_dropped += tx_dropped;
925 netdev->stats.tx_bytes += tx_bytes;
926 netdev->stats.tx_packets += tx_packets;
927 adapter->tx_send_failed += tx_send_failed;
928 adapter->tx_map_failed += tx_map_failed;
933 static void ibmvnic_set_multi(struct net_device *netdev)
935 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
936 struct netdev_hw_addr *ha;
937 union ibmvnic_crq crq;
939 memset(&crq, 0, sizeof(crq));
940 crq.request_capability.first = IBMVNIC_CRQ_CMD;
941 crq.request_capability.cmd = REQUEST_CAPABILITY;
943 if (netdev->flags & IFF_PROMISC) {
944 if (!adapter->promisc_supported)
947 if (netdev->flags & IFF_ALLMULTI) {
948 /* Accept all multicast */
949 memset(&crq, 0, sizeof(crq));
950 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
951 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
952 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
953 ibmvnic_send_crq(adapter, &crq);
954 } else if (netdev_mc_empty(netdev)) {
955 /* Reject all multicast */
956 memset(&crq, 0, sizeof(crq));
957 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
958 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
959 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
960 ibmvnic_send_crq(adapter, &crq);
962 /* Accept one or more multicast(s) */
963 netdev_for_each_mc_addr(ha, netdev) {
964 memset(&crq, 0, sizeof(crq));
965 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
966 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
967 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
968 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
970 ibmvnic_send_crq(adapter, &crq);
976 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
978 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
979 struct sockaddr *addr = p;
980 union ibmvnic_crq crq;
982 if (!is_valid_ether_addr(addr->sa_data))
983 return -EADDRNOTAVAIL;
985 memset(&crq, 0, sizeof(crq));
986 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
987 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
988 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
989 ibmvnic_send_crq(adapter, &crq);
990 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
994 static void ibmvnic_tx_timeout(struct net_device *dev)
996 struct ibmvnic_adapter *adapter = netdev_priv(dev);
999 /* Adapter timed out, resetting it */
1000 release_sub_crqs(adapter);
1001 rc = ibmvnic_reset_crq(adapter);
1003 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1005 ibmvnic_send_crq_init(adapter);
1008 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1009 struct ibmvnic_rx_buff *rx_buff)
1011 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1013 rx_buff->skb = NULL;
1015 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1016 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1018 atomic_dec(&pool->available);
1021 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1023 struct net_device *netdev = napi->dev;
1024 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1025 int scrq_num = (int)(napi - adapter->napi);
1026 int frames_processed = 0;
1028 while (frames_processed < budget) {
1029 struct sk_buff *skb;
1030 struct ibmvnic_rx_buff *rx_buff;
1031 union sub_crq *next;
1036 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1038 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1040 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1041 rx_comp.correlator);
1042 /* do error checking */
1043 if (next->rx_comp.rc) {
1044 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1045 /* free the entry */
1046 next->rx_comp.first = 0;
1047 remove_buff_from_pool(adapter, rx_buff);
1051 length = be32_to_cpu(next->rx_comp.len);
1052 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1053 flags = next->rx_comp.flags;
1055 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1057 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1058 /* free the entry */
1059 next->rx_comp.first = 0;
1060 remove_buff_from_pool(adapter, rx_buff);
1062 skb_put(skb, length);
1063 skb->protocol = eth_type_trans(skb, netdev);
1065 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1066 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1067 skb->ip_summed = CHECKSUM_UNNECESSARY;
1071 napi_gro_receive(napi, skb); /* send it up */
1072 netdev->stats.rx_packets++;
1073 netdev->stats.rx_bytes += length;
1076 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1078 if (frames_processed < budget) {
1079 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1080 napi_complete_done(napi, frames_processed);
1081 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1082 napi_reschedule(napi)) {
1083 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1087 return frames_processed;
1090 #ifdef CONFIG_NET_POLL_CONTROLLER
1091 static void ibmvnic_netpoll_controller(struct net_device *dev)
1093 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1096 replenish_pools(netdev_priv(dev));
1097 for (i = 0; i < adapter->req_rx_queues; i++)
1098 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1099 adapter->rx_scrq[i]);
1103 static const struct net_device_ops ibmvnic_netdev_ops = {
1104 .ndo_open = ibmvnic_open,
1105 .ndo_stop = ibmvnic_close,
1106 .ndo_start_xmit = ibmvnic_xmit,
1107 .ndo_set_rx_mode = ibmvnic_set_multi,
1108 .ndo_set_mac_address = ibmvnic_set_mac,
1109 .ndo_validate_addr = eth_validate_addr,
1110 .ndo_tx_timeout = ibmvnic_tx_timeout,
1111 #ifdef CONFIG_NET_POLL_CONTROLLER
1112 .ndo_poll_controller = ibmvnic_netpoll_controller,
1116 /* ethtool functions */
1118 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1119 struct ethtool_link_ksettings *cmd)
1121 u32 supported, advertising;
1123 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1125 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1127 cmd->base.speed = SPEED_1000;
1128 cmd->base.duplex = DUPLEX_FULL;
1129 cmd->base.port = PORT_FIBRE;
1130 cmd->base.phy_address = 0;
1131 cmd->base.autoneg = AUTONEG_ENABLE;
1133 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1135 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1141 static void ibmvnic_get_drvinfo(struct net_device *dev,
1142 struct ethtool_drvinfo *info)
1144 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1145 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1148 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1150 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1152 return adapter->msg_enable;
1155 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1157 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1159 adapter->msg_enable = data;
1162 static u32 ibmvnic_get_link(struct net_device *netdev)
1164 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1166 /* Don't need to send a query because we request a logical link up at
1167 * init and then we wait for link state indications
1169 return adapter->logical_link_state;
1172 static void ibmvnic_get_ringparam(struct net_device *netdev,
1173 struct ethtool_ringparam *ring)
1175 ring->rx_max_pending = 0;
1176 ring->tx_max_pending = 0;
1177 ring->rx_mini_max_pending = 0;
1178 ring->rx_jumbo_max_pending = 0;
1179 ring->rx_pending = 0;
1180 ring->tx_pending = 0;
1181 ring->rx_mini_pending = 0;
1182 ring->rx_jumbo_pending = 0;
1185 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1189 if (stringset != ETH_SS_STATS)
1192 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1193 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1196 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1200 return ARRAY_SIZE(ibmvnic_stats);
1206 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1207 struct ethtool_stats *stats, u64 *data)
1209 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1210 union ibmvnic_crq crq;
1213 memset(&crq, 0, sizeof(crq));
1214 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1215 crq.request_statistics.cmd = REQUEST_STATISTICS;
1216 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1217 crq.request_statistics.len =
1218 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1220 /* Wait for data to be written */
1221 init_completion(&adapter->stats_done);
1222 ibmvnic_send_crq(adapter, &crq);
1223 wait_for_completion(&adapter->stats_done);
1225 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1226 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1229 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1230 .get_drvinfo = ibmvnic_get_drvinfo,
1231 .get_msglevel = ibmvnic_get_msglevel,
1232 .set_msglevel = ibmvnic_set_msglevel,
1233 .get_link = ibmvnic_get_link,
1234 .get_ringparam = ibmvnic_get_ringparam,
1235 .get_strings = ibmvnic_get_strings,
1236 .get_sset_count = ibmvnic_get_sset_count,
1237 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1238 .get_link_ksettings = ibmvnic_get_link_ksettings,
1241 /* Routines for managing CRQs/sCRQs */
1243 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1244 struct ibmvnic_sub_crq_queue *scrq)
1246 struct device *dev = &adapter->vdev->dev;
1249 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1251 /* Close the sub-crqs */
1253 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1254 adapter->vdev->unit_address,
1256 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1258 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1260 free_pages((unsigned long)scrq->msgs, 2);
1264 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1267 struct device *dev = &adapter->vdev->dev;
1268 struct ibmvnic_sub_crq_queue *scrq;
1271 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1275 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1276 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1278 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1279 goto zero_page_failed;
1282 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1284 if (dma_mapping_error(dev, scrq->msg_token)) {
1285 dev_warn(dev, "Couldn't map crq queue messages page\n");
1289 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1290 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1292 if (rc == H_RESOURCE)
1293 rc = ibmvnic_reset_crq(adapter);
1295 if (rc == H_CLOSED) {
1296 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1298 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1302 scrq->adapter = adapter;
1303 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1305 atomic_set(&scrq->used, 0);
1306 scrq->rx_skb_top = NULL;
1307 spin_lock_init(&scrq->lock);
1309 netdev_dbg(adapter->netdev,
1310 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1311 scrq->crq_num, scrq->hw_irq, scrq->irq);
1316 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1319 free_pages((unsigned long)scrq->msgs, 2);
1326 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1330 if (adapter->tx_scrq) {
1331 for (i = 0; i < adapter->req_tx_queues; i++)
1332 if (adapter->tx_scrq[i]) {
1333 free_irq(adapter->tx_scrq[i]->irq,
1334 adapter->tx_scrq[i]);
1335 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1336 release_sub_crq_queue(adapter,
1337 adapter->tx_scrq[i]);
1339 kfree(adapter->tx_scrq);
1340 adapter->tx_scrq = NULL;
1343 if (adapter->rx_scrq) {
1344 for (i = 0; i < adapter->req_rx_queues; i++)
1345 if (adapter->rx_scrq[i]) {
1346 free_irq(adapter->rx_scrq[i]->irq,
1347 adapter->rx_scrq[i]);
1348 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1349 release_sub_crq_queue(adapter,
1350 adapter->rx_scrq[i]);
1352 kfree(adapter->rx_scrq);
1353 adapter->rx_scrq = NULL;
1357 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1361 if (adapter->tx_scrq) {
1362 for (i = 0; i < adapter->req_tx_queues; i++)
1363 if (adapter->tx_scrq[i])
1364 release_sub_crq_queue(adapter,
1365 adapter->tx_scrq[i]);
1366 adapter->tx_scrq = NULL;
1369 if (adapter->rx_scrq) {
1370 for (i = 0; i < adapter->req_rx_queues; i++)
1371 if (adapter->rx_scrq[i])
1372 release_sub_crq_queue(adapter,
1373 adapter->rx_scrq[i]);
1374 adapter->rx_scrq = NULL;
1378 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1379 struct ibmvnic_sub_crq_queue *scrq)
1381 struct device *dev = &adapter->vdev->dev;
1384 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1385 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1387 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1392 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1393 struct ibmvnic_sub_crq_queue *scrq)
1395 struct device *dev = &adapter->vdev->dev;
1398 if (scrq->hw_irq > 0x100000000ULL) {
1399 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1403 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1404 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1406 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1411 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1412 struct ibmvnic_sub_crq_queue *scrq)
1414 struct device *dev = &adapter->vdev->dev;
1415 struct ibmvnic_tx_buff *txbuff;
1416 union sub_crq *next;
1422 while (pending_scrq(adapter, scrq)) {
1423 unsigned int pool = scrq->pool_index;
1425 next = ibmvnic_next_scrq(adapter, scrq);
1426 for (i = 0; i < next->tx_comp.num_comps; i++) {
1427 if (next->tx_comp.rcs[i]) {
1428 dev_err(dev, "tx error %x\n",
1429 next->tx_comp.rcs[i]);
1432 index = be32_to_cpu(next->tx_comp.correlators[i]);
1433 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1435 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1436 if (!txbuff->data_dma[j])
1439 txbuff->data_dma[j] = 0;
1440 txbuff->used_bounce = false;
1442 /* if sub_crq was sent indirectly */
1443 first = txbuff->indir_arr[0].generic.first;
1444 if (first == IBMVNIC_CRQ_CMD) {
1445 dma_unmap_single(dev, txbuff->indir_dma,
1446 sizeof(txbuff->indir_arr),
1450 if (txbuff->last_frag) {
1451 atomic_dec(&scrq->used);
1453 if (atomic_read(&scrq->used) <=
1454 (adapter->req_tx_entries_per_subcrq / 2) &&
1455 netif_subqueue_stopped(adapter->netdev,
1457 netif_wake_subqueue(adapter->netdev,
1459 netdev_dbg(adapter->netdev,
1460 "Started queue %d\n",
1464 dev_kfree_skb_any(txbuff->skb);
1467 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1468 producer_index] = index;
1469 adapter->tx_pool[pool].producer_index =
1470 (adapter->tx_pool[pool].producer_index + 1) %
1471 adapter->req_tx_entries_per_subcrq;
1473 /* remove tx_comp scrq*/
1474 next->tx_comp.first = 0;
1477 enable_scrq_irq(adapter, scrq);
1479 if (pending_scrq(adapter, scrq)) {
1480 disable_scrq_irq(adapter, scrq);
1487 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1489 struct ibmvnic_sub_crq_queue *scrq = instance;
1490 struct ibmvnic_adapter *adapter = scrq->adapter;
1492 disable_scrq_irq(adapter, scrq);
1493 ibmvnic_complete_tx(adapter, scrq);
1498 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1500 struct ibmvnic_sub_crq_queue *scrq = instance;
1501 struct ibmvnic_adapter *adapter = scrq->adapter;
1503 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1504 disable_scrq_irq(adapter, scrq);
1505 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1511 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1513 struct device *dev = &adapter->vdev->dev;
1514 struct ibmvnic_sub_crq_queue *scrq;
1518 for (i = 0; i < adapter->req_tx_queues; i++) {
1519 scrq = adapter->tx_scrq[i];
1520 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1524 dev_err(dev, "Error mapping irq\n");
1525 goto req_tx_irq_failed;
1528 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1529 0, "ibmvnic_tx", scrq);
1532 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1534 irq_dispose_mapping(scrq->irq);
1535 goto req_rx_irq_failed;
1539 for (i = 0; i < adapter->req_rx_queues; i++) {
1540 scrq = adapter->rx_scrq[i];
1541 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1544 dev_err(dev, "Error mapping irq\n");
1545 goto req_rx_irq_failed;
1547 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1548 0, "ibmvnic_rx", scrq);
1550 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1552 irq_dispose_mapping(scrq->irq);
1553 goto req_rx_irq_failed;
1559 for (j = 0; j < i; j++) {
1560 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1561 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1563 i = adapter->req_tx_queues;
1565 for (j = 0; j < i; j++) {
1566 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1567 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1569 release_sub_crqs_no_irqs(adapter);
1573 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1575 struct device *dev = &adapter->vdev->dev;
1576 struct ibmvnic_sub_crq_queue **allqueues;
1577 int registered_queues = 0;
1578 union ibmvnic_crq crq;
1584 /* Sub-CRQ entries are 32 byte long */
1585 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1587 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1588 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1589 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1590 goto allqueues_failed;
1593 /* Get the minimum between the queried max and the entries
1594 * that fit in our PAGE_SIZE
1596 adapter->req_tx_entries_per_subcrq =
1597 adapter->max_tx_entries_per_subcrq > entries_page ?
1598 entries_page : adapter->max_tx_entries_per_subcrq;
1599 adapter->req_rx_add_entries_per_subcrq =
1600 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1601 entries_page : adapter->max_rx_add_entries_per_subcrq;
1603 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1604 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1605 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1607 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1610 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1612 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1614 goto allqueues_failed;
1616 for (i = 0; i < total_queues; i++) {
1617 allqueues[i] = init_sub_crq_queue(adapter);
1618 if (!allqueues[i]) {
1619 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1622 registered_queues++;
1625 /* Make sure we were able to register the minimum number of queues */
1626 if (registered_queues <
1627 adapter->min_tx_queues + adapter->min_rx_queues) {
1628 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1632 /* Distribute the failed allocated queues*/
1633 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1634 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1637 if (adapter->req_rx_queues > adapter->min_rx_queues)
1638 adapter->req_rx_queues--;
1643 if (adapter->req_tx_queues > adapter->min_tx_queues)
1644 adapter->req_tx_queues--;
1651 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1652 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1653 if (!adapter->tx_scrq)
1656 for (i = 0; i < adapter->req_tx_queues; i++) {
1657 adapter->tx_scrq[i] = allqueues[i];
1658 adapter->tx_scrq[i]->pool_index = i;
1661 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1662 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1663 if (!adapter->rx_scrq)
1666 for (i = 0; i < adapter->req_rx_queues; i++) {
1667 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1668 adapter->rx_scrq[i]->scrq_num = i;
1671 memset(&crq, 0, sizeof(crq));
1672 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1673 crq.request_capability.cmd = REQUEST_CAPABILITY;
1675 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1676 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1677 atomic_inc(&adapter->running_cap_crqs);
1678 ibmvnic_send_crq(adapter, &crq);
1680 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1681 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1682 atomic_inc(&adapter->running_cap_crqs);
1683 ibmvnic_send_crq(adapter, &crq);
1685 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1686 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1687 atomic_inc(&adapter->running_cap_crqs);
1688 ibmvnic_send_crq(adapter, &crq);
1690 crq.request_capability.capability =
1691 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1692 crq.request_capability.number =
1693 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1694 atomic_inc(&adapter->running_cap_crqs);
1695 ibmvnic_send_crq(adapter, &crq);
1697 crq.request_capability.capability =
1698 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1699 crq.request_capability.number =
1700 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1701 atomic_inc(&adapter->running_cap_crqs);
1702 ibmvnic_send_crq(adapter, &crq);
1704 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1705 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1706 atomic_inc(&adapter->running_cap_crqs);
1707 ibmvnic_send_crq(adapter, &crq);
1709 if (adapter->netdev->flags & IFF_PROMISC) {
1710 if (adapter->promisc_supported) {
1711 crq.request_capability.capability =
1712 cpu_to_be16(PROMISC_REQUESTED);
1713 crq.request_capability.number = cpu_to_be64(1);
1714 atomic_inc(&adapter->running_cap_crqs);
1715 ibmvnic_send_crq(adapter, &crq);
1718 crq.request_capability.capability =
1719 cpu_to_be16(PROMISC_REQUESTED);
1720 crq.request_capability.number = cpu_to_be64(0);
1721 atomic_inc(&adapter->running_cap_crqs);
1722 ibmvnic_send_crq(adapter, &crq);
1730 kfree(adapter->tx_scrq);
1731 adapter->tx_scrq = NULL;
1733 for (i = 0; i < registered_queues; i++)
1734 release_sub_crq_queue(adapter, allqueues[i]);
1737 ibmvnic_remove(adapter->vdev);
1740 static int pending_scrq(struct ibmvnic_adapter *adapter,
1741 struct ibmvnic_sub_crq_queue *scrq)
1743 union sub_crq *entry = &scrq->msgs[scrq->cur];
1745 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1751 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1752 struct ibmvnic_sub_crq_queue *scrq)
1754 union sub_crq *entry;
1755 unsigned long flags;
1757 spin_lock_irqsave(&scrq->lock, flags);
1758 entry = &scrq->msgs[scrq->cur];
1759 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1760 if (++scrq->cur == scrq->size)
1765 spin_unlock_irqrestore(&scrq->lock, flags);
1770 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1772 struct ibmvnic_crq_queue *queue = &adapter->crq;
1773 union ibmvnic_crq *crq;
1775 crq = &queue->msgs[queue->cur];
1776 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1777 if (++queue->cur == queue->size)
1786 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1787 union sub_crq *sub_crq)
1789 unsigned int ua = adapter->vdev->unit_address;
1790 struct device *dev = &adapter->vdev->dev;
1791 u64 *u64_crq = (u64 *)sub_crq;
1794 netdev_dbg(adapter->netdev,
1795 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1796 (unsigned long int)cpu_to_be64(remote_handle),
1797 (unsigned long int)cpu_to_be64(u64_crq[0]),
1798 (unsigned long int)cpu_to_be64(u64_crq[1]),
1799 (unsigned long int)cpu_to_be64(u64_crq[2]),
1800 (unsigned long int)cpu_to_be64(u64_crq[3]));
1802 /* Make sure the hypervisor sees the complete request */
1805 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1806 cpu_to_be64(remote_handle),
1807 cpu_to_be64(u64_crq[0]),
1808 cpu_to_be64(u64_crq[1]),
1809 cpu_to_be64(u64_crq[2]),
1810 cpu_to_be64(u64_crq[3]));
1814 dev_warn(dev, "CRQ Queue closed\n");
1815 dev_err(dev, "Send error (rc=%d)\n", rc);
1821 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1822 u64 remote_handle, u64 ioba, u64 num_entries)
1824 unsigned int ua = adapter->vdev->unit_address;
1825 struct device *dev = &adapter->vdev->dev;
1828 /* Make sure the hypervisor sees the complete request */
1830 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1831 cpu_to_be64(remote_handle),
1836 dev_warn(dev, "CRQ Queue closed\n");
1837 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1843 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1844 union ibmvnic_crq *crq)
1846 unsigned int ua = adapter->vdev->unit_address;
1847 struct device *dev = &adapter->vdev->dev;
1848 u64 *u64_crq = (u64 *)crq;
1851 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1852 (unsigned long int)cpu_to_be64(u64_crq[0]),
1853 (unsigned long int)cpu_to_be64(u64_crq[1]));
1855 /* Make sure the hypervisor sees the complete request */
1858 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1859 cpu_to_be64(u64_crq[0]),
1860 cpu_to_be64(u64_crq[1]));
1864 dev_warn(dev, "CRQ Queue closed\n");
1865 dev_warn(dev, "Send error (rc=%d)\n", rc);
1871 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1873 union ibmvnic_crq crq;
1875 memset(&crq, 0, sizeof(crq));
1876 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1877 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1878 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1880 return ibmvnic_send_crq(adapter, &crq);
1883 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1885 union ibmvnic_crq crq;
1887 memset(&crq, 0, sizeof(crq));
1888 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1889 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1890 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1892 return ibmvnic_send_crq(adapter, &crq);
1895 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1897 union ibmvnic_crq crq;
1899 memset(&crq, 0, sizeof(crq));
1900 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1901 crq.version_exchange.cmd = VERSION_EXCHANGE;
1902 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1904 return ibmvnic_send_crq(adapter, &crq);
1907 static void send_login(struct ibmvnic_adapter *adapter)
1909 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1910 struct ibmvnic_login_buffer *login_buffer;
1911 struct ibmvnic_inflight_cmd *inflight_cmd;
1912 struct device *dev = &adapter->vdev->dev;
1913 dma_addr_t rsp_buffer_token;
1914 dma_addr_t buffer_token;
1915 size_t rsp_buffer_size;
1916 union ibmvnic_crq crq;
1917 unsigned long flags;
1924 sizeof(struct ibmvnic_login_buffer) +
1925 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1927 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1929 goto buf_alloc_failed;
1931 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1933 if (dma_mapping_error(dev, buffer_token)) {
1934 dev_err(dev, "Couldn't map login buffer\n");
1935 goto buf_map_failed;
1938 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1939 sizeof(u64) * adapter->req_tx_queues +
1940 sizeof(u64) * adapter->req_rx_queues +
1941 sizeof(u64) * adapter->req_rx_queues +
1942 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1944 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1945 if (!login_rsp_buffer)
1946 goto buf_rsp_alloc_failed;
1948 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1949 rsp_buffer_size, DMA_FROM_DEVICE);
1950 if (dma_mapping_error(dev, rsp_buffer_token)) {
1951 dev_err(dev, "Couldn't map login rsp buffer\n");
1952 goto buf_rsp_map_failed;
1954 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1955 if (!inflight_cmd) {
1956 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1957 goto inflight_alloc_failed;
1959 adapter->login_buf = login_buffer;
1960 adapter->login_buf_token = buffer_token;
1961 adapter->login_buf_sz = buffer_size;
1962 adapter->login_rsp_buf = login_rsp_buffer;
1963 adapter->login_rsp_buf_token = rsp_buffer_token;
1964 adapter->login_rsp_buf_sz = rsp_buffer_size;
1966 login_buffer->len = cpu_to_be32(buffer_size);
1967 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1968 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1969 login_buffer->off_txcomp_subcrqs =
1970 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1971 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1972 login_buffer->off_rxcomp_subcrqs =
1973 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1974 sizeof(u64) * adapter->req_tx_queues);
1975 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1976 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1978 tx_list_p = (__be64 *)((char *)login_buffer +
1979 sizeof(struct ibmvnic_login_buffer));
1980 rx_list_p = (__be64 *)((char *)login_buffer +
1981 sizeof(struct ibmvnic_login_buffer) +
1982 sizeof(u64) * adapter->req_tx_queues);
1984 for (i = 0; i < adapter->req_tx_queues; i++) {
1985 if (adapter->tx_scrq[i]) {
1986 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1991 for (i = 0; i < adapter->req_rx_queues; i++) {
1992 if (adapter->rx_scrq[i]) {
1993 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1998 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1999 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2000 netdev_dbg(adapter->netdev, "%016lx\n",
2001 ((unsigned long int *)(adapter->login_buf))[i]);
2004 memset(&crq, 0, sizeof(crq));
2005 crq.login.first = IBMVNIC_CRQ_CMD;
2006 crq.login.cmd = LOGIN;
2007 crq.login.ioba = cpu_to_be32(buffer_token);
2008 crq.login.len = cpu_to_be32(buffer_size);
2010 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2012 spin_lock_irqsave(&adapter->inflight_lock, flags);
2013 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2014 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2016 ibmvnic_send_crq(adapter, &crq);
2020 inflight_alloc_failed:
2021 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
2024 kfree(login_rsp_buffer);
2025 buf_rsp_alloc_failed:
2026 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2028 kfree(login_buffer);
2033 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2036 union ibmvnic_crq crq;
2038 memset(&crq, 0, sizeof(crq));
2039 crq.request_map.first = IBMVNIC_CRQ_CMD;
2040 crq.request_map.cmd = REQUEST_MAP;
2041 crq.request_map.map_id = map_id;
2042 crq.request_map.ioba = cpu_to_be32(addr);
2043 crq.request_map.len = cpu_to_be32(len);
2044 ibmvnic_send_crq(adapter, &crq);
2047 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2049 union ibmvnic_crq crq;
2051 memset(&crq, 0, sizeof(crq));
2052 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2053 crq.request_unmap.cmd = REQUEST_UNMAP;
2054 crq.request_unmap.map_id = map_id;
2055 ibmvnic_send_crq(adapter, &crq);
2058 static void send_map_query(struct ibmvnic_adapter *adapter)
2060 union ibmvnic_crq crq;
2062 memset(&crq, 0, sizeof(crq));
2063 crq.query_map.first = IBMVNIC_CRQ_CMD;
2064 crq.query_map.cmd = QUERY_MAP;
2065 ibmvnic_send_crq(adapter, &crq);
2068 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2069 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2071 union ibmvnic_crq crq;
2073 atomic_set(&adapter->running_cap_crqs, 0);
2074 memset(&crq, 0, sizeof(crq));
2075 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2076 crq.query_capability.cmd = QUERY_CAPABILITY;
2078 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2079 atomic_inc(&adapter->running_cap_crqs);
2080 ibmvnic_send_crq(adapter, &crq);
2082 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2083 atomic_inc(&adapter->running_cap_crqs);
2084 ibmvnic_send_crq(adapter, &crq);
2086 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2087 atomic_inc(&adapter->running_cap_crqs);
2088 ibmvnic_send_crq(adapter, &crq);
2090 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2091 atomic_inc(&adapter->running_cap_crqs);
2092 ibmvnic_send_crq(adapter, &crq);
2094 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2095 atomic_inc(&adapter->running_cap_crqs);
2096 ibmvnic_send_crq(adapter, &crq);
2098 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2099 atomic_inc(&adapter->running_cap_crqs);
2100 ibmvnic_send_crq(adapter, &crq);
2102 crq.query_capability.capability =
2103 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2104 atomic_inc(&adapter->running_cap_crqs);
2105 ibmvnic_send_crq(adapter, &crq);
2107 crq.query_capability.capability =
2108 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2109 atomic_inc(&adapter->running_cap_crqs);
2110 ibmvnic_send_crq(adapter, &crq);
2112 crq.query_capability.capability =
2113 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2114 atomic_inc(&adapter->running_cap_crqs);
2115 ibmvnic_send_crq(adapter, &crq);
2117 crq.query_capability.capability =
2118 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2119 atomic_inc(&adapter->running_cap_crqs);
2120 ibmvnic_send_crq(adapter, &crq);
2122 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2123 atomic_inc(&adapter->running_cap_crqs);
2124 ibmvnic_send_crq(adapter, &crq);
2126 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2127 atomic_inc(&adapter->running_cap_crqs);
2128 ibmvnic_send_crq(adapter, &crq);
2130 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2131 atomic_inc(&adapter->running_cap_crqs);
2132 ibmvnic_send_crq(adapter, &crq);
2134 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2135 atomic_inc(&adapter->running_cap_crqs);
2136 ibmvnic_send_crq(adapter, &crq);
2138 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2139 atomic_inc(&adapter->running_cap_crqs);
2140 ibmvnic_send_crq(adapter, &crq);
2142 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2143 atomic_inc(&adapter->running_cap_crqs);
2144 ibmvnic_send_crq(adapter, &crq);
2146 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2147 atomic_inc(&adapter->running_cap_crqs);
2148 ibmvnic_send_crq(adapter, &crq);
2150 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2151 atomic_inc(&adapter->running_cap_crqs);
2152 ibmvnic_send_crq(adapter, &crq);
2154 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2155 atomic_inc(&adapter->running_cap_crqs);
2156 ibmvnic_send_crq(adapter, &crq);
2158 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2159 atomic_inc(&adapter->running_cap_crqs);
2160 ibmvnic_send_crq(adapter, &crq);
2162 crq.query_capability.capability =
2163 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2164 atomic_inc(&adapter->running_cap_crqs);
2165 ibmvnic_send_crq(adapter, &crq);
2167 crq.query_capability.capability =
2168 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2169 atomic_inc(&adapter->running_cap_crqs);
2170 ibmvnic_send_crq(adapter, &crq);
2172 crq.query_capability.capability =
2173 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2174 atomic_inc(&adapter->running_cap_crqs);
2175 ibmvnic_send_crq(adapter, &crq);
2177 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2178 atomic_inc(&adapter->running_cap_crqs);
2179 ibmvnic_send_crq(adapter, &crq);
2182 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2184 struct device *dev = &adapter->vdev->dev;
2185 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2186 union ibmvnic_crq crq;
2189 dma_unmap_single(dev, adapter->ip_offload_tok,
2190 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2192 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2193 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2194 netdev_dbg(adapter->netdev, "%016lx\n",
2195 ((unsigned long int *)(buf))[i]);
2197 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2198 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2199 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2200 buf->tcp_ipv4_chksum);
2201 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2202 buf->tcp_ipv6_chksum);
2203 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2204 buf->udp_ipv4_chksum);
2205 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2206 buf->udp_ipv6_chksum);
2207 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2208 buf->large_tx_ipv4);
2209 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2210 buf->large_tx_ipv6);
2211 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2212 buf->large_rx_ipv4);
2213 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2214 buf->large_rx_ipv6);
2215 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2216 buf->max_ipv4_header_size);
2217 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2218 buf->max_ipv6_header_size);
2219 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2220 buf->max_tcp_header_size);
2221 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2222 buf->max_udp_header_size);
2223 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2224 buf->max_large_tx_size);
2225 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2226 buf->max_large_rx_size);
2227 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2228 buf->ipv6_extension_header);
2229 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2230 buf->tcp_pseudosum_req);
2231 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2232 buf->num_ipv6_ext_headers);
2233 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2234 buf->off_ipv6_ext_headers);
2236 adapter->ip_offload_ctrl_tok =
2237 dma_map_single(dev, &adapter->ip_offload_ctrl,
2238 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2240 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2241 dev_err(dev, "Couldn't map ip offload control buffer\n");
2245 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2246 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2247 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2248 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2249 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2251 /* large_tx/rx disabled for now, additional features needed */
2252 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2253 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2254 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2255 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2257 adapter->netdev->features = NETIF_F_GSO;
2259 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2260 adapter->netdev->features |= NETIF_F_IP_CSUM;
2262 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2263 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2265 if ((adapter->netdev->features &
2266 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2267 adapter->netdev->features |= NETIF_F_RXCSUM;
2269 memset(&crq, 0, sizeof(crq));
2270 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2271 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2272 crq.control_ip_offload.len =
2273 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2274 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2275 ibmvnic_send_crq(adapter, &crq);
2278 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2279 struct ibmvnic_adapter *adapter)
2281 struct device *dev = &adapter->vdev->dev;
2282 struct ibmvnic_error_buff *error_buff, *tmp;
2283 unsigned long flags;
2287 if (!crq->request_error_rsp.rc.code) {
2288 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2289 crq->request_error_rsp.rc.code);
2293 spin_lock_irqsave(&adapter->error_list_lock, flags);
2294 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2295 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2297 list_del(&error_buff->list);
2300 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2303 dev_err(dev, "Couldn't find error id %x\n",
2304 be32_to_cpu(crq->request_error_rsp.error_id));
2308 dev_err(dev, "Detailed info for error id %x:",
2309 be32_to_cpu(crq->request_error_rsp.error_id));
2311 for (i = 0; i < error_buff->len; i++) {
2312 pr_cont("%02x", (int)error_buff->buff[i]);
2318 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2320 kfree(error_buff->buff);
2324 static void handle_error_indication(union ibmvnic_crq *crq,
2325 struct ibmvnic_adapter *adapter)
2327 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2328 struct ibmvnic_inflight_cmd *inflight_cmd;
2329 struct device *dev = &adapter->vdev->dev;
2330 struct ibmvnic_error_buff *error_buff;
2331 union ibmvnic_crq new_crq;
2332 unsigned long flags;
2334 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2335 crq->error_indication.
2336 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2337 be32_to_cpu(crq->error_indication.error_id),
2338 be16_to_cpu(crq->error_indication.error_cause));
2340 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2344 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2345 if (!error_buff->buff) {
2350 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2352 if (dma_mapping_error(dev, error_buff->dma)) {
2353 if (!firmware_has_feature(FW_FEATURE_CMO))
2354 dev_err(dev, "Couldn't map error buffer\n");
2355 kfree(error_buff->buff);
2360 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2361 if (!inflight_cmd) {
2362 dma_unmap_single(dev, error_buff->dma, detail_len,
2364 kfree(error_buff->buff);
2369 error_buff->len = detail_len;
2370 error_buff->error_id = crq->error_indication.error_id;
2372 spin_lock_irqsave(&adapter->error_list_lock, flags);
2373 list_add_tail(&error_buff->list, &adapter->errors);
2374 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2376 memset(&new_crq, 0, sizeof(new_crq));
2377 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2378 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2379 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2380 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2381 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2383 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2385 spin_lock_irqsave(&adapter->inflight_lock, flags);
2386 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2387 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2389 ibmvnic_send_crq(adapter, &new_crq);
2392 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2393 struct ibmvnic_adapter *adapter)
2395 struct net_device *netdev = adapter->netdev;
2396 struct device *dev = &adapter->vdev->dev;
2399 rc = crq->change_mac_addr_rsp.rc.code;
2401 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2404 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2408 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2409 struct ibmvnic_adapter *adapter)
2411 struct device *dev = &adapter->vdev->dev;
2415 atomic_dec(&adapter->running_cap_crqs);
2416 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2418 req_value = &adapter->req_tx_queues;
2422 req_value = &adapter->req_rx_queues;
2425 case REQ_RX_ADD_QUEUES:
2426 req_value = &adapter->req_rx_add_queues;
2429 case REQ_TX_ENTRIES_PER_SUBCRQ:
2430 req_value = &adapter->req_tx_entries_per_subcrq;
2431 name = "tx_entries_per_subcrq";
2433 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2434 req_value = &adapter->req_rx_add_entries_per_subcrq;
2435 name = "rx_add_entries_per_subcrq";
2438 req_value = &adapter->req_mtu;
2441 case PROMISC_REQUESTED:
2442 req_value = &adapter->promisc;
2446 dev_err(dev, "Got invalid cap request rsp %d\n",
2447 crq->request_capability.capability);
2451 switch (crq->request_capability_rsp.rc.code) {
2454 case PARTIALSUCCESS:
2455 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2457 (long int)be64_to_cpu(crq->request_capability_rsp.
2459 release_sub_crqs_no_irqs(adapter);
2460 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2461 init_sub_crqs(adapter, 1);
2464 dev_err(dev, "Error %d in request cap rsp\n",
2465 crq->request_capability_rsp.rc.code);
2469 /* Done receiving requested capabilities, query IP offload support */
2470 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2471 union ibmvnic_crq newcrq;
2472 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2473 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2474 &adapter->ip_offload_buf;
2476 adapter->wait_capability = false;
2477 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2481 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2482 if (!firmware_has_feature(FW_FEATURE_CMO))
2483 dev_err(dev, "Couldn't map offload buffer\n");
2487 memset(&newcrq, 0, sizeof(newcrq));
2488 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2489 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2490 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2491 newcrq.query_ip_offload.ioba =
2492 cpu_to_be32(adapter->ip_offload_tok);
2494 ibmvnic_send_crq(adapter, &newcrq);
2498 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2499 struct ibmvnic_adapter *adapter)
2501 struct device *dev = &adapter->vdev->dev;
2502 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2503 struct ibmvnic_login_buffer *login = adapter->login_buf;
2506 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2508 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2509 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2511 /* If the number of queues requested can't be allocated by the
2512 * server, the login response will return with code 1. We will need
2513 * to resend the login buffer with fewer queues requested.
2515 if (login_rsp_crq->generic.rc.code) {
2516 adapter->renegotiate = true;
2517 complete(&adapter->init_done);
2521 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2522 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2523 netdev_dbg(adapter->netdev, "%016lx\n",
2524 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2528 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2529 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2530 adapter->req_rx_add_queues !=
2531 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2532 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2533 ibmvnic_remove(adapter->vdev);
2536 complete(&adapter->init_done);
2541 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2542 struct ibmvnic_adapter *adapter)
2544 struct device *dev = &adapter->vdev->dev;
2545 u8 map_id = crq->request_map_rsp.map_id;
2551 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2552 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2554 rc = crq->request_map_rsp.rc.code;
2556 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2558 /* need to find and zero tx/rx_pool map_id */
2559 for (i = 0; i < tx_subcrqs; i++) {
2560 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2561 adapter->tx_pool[i].long_term_buff.map_id = 0;
2563 for (i = 0; i < rx_subcrqs; i++) {
2564 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2565 adapter->rx_pool[i].long_term_buff.map_id = 0;
2568 complete(&adapter->fw_done);
2571 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2572 struct ibmvnic_adapter *adapter)
2574 struct device *dev = &adapter->vdev->dev;
2577 rc = crq->request_unmap_rsp.rc.code;
2579 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2582 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2583 struct ibmvnic_adapter *adapter)
2585 struct net_device *netdev = adapter->netdev;
2586 struct device *dev = &adapter->vdev->dev;
2589 rc = crq->query_map_rsp.rc.code;
2591 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2594 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2595 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2596 crq->query_map_rsp.free_pages);
2599 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2600 struct ibmvnic_adapter *adapter)
2602 struct net_device *netdev = adapter->netdev;
2603 struct device *dev = &adapter->vdev->dev;
2606 atomic_dec(&adapter->running_cap_crqs);
2607 netdev_dbg(netdev, "Outstanding queries: %d\n",
2608 atomic_read(&adapter->running_cap_crqs));
2609 rc = crq->query_capability.rc.code;
2611 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2615 switch (be16_to_cpu(crq->query_capability.capability)) {
2617 adapter->min_tx_queues =
2618 be64_to_cpu(crq->query_capability.number);
2619 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2620 adapter->min_tx_queues);
2623 adapter->min_rx_queues =
2624 be64_to_cpu(crq->query_capability.number);
2625 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2626 adapter->min_rx_queues);
2628 case MIN_RX_ADD_QUEUES:
2629 adapter->min_rx_add_queues =
2630 be64_to_cpu(crq->query_capability.number);
2631 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2632 adapter->min_rx_add_queues);
2635 adapter->max_tx_queues =
2636 be64_to_cpu(crq->query_capability.number);
2637 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2638 adapter->max_tx_queues);
2641 adapter->max_rx_queues =
2642 be64_to_cpu(crq->query_capability.number);
2643 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2644 adapter->max_rx_queues);
2646 case MAX_RX_ADD_QUEUES:
2647 adapter->max_rx_add_queues =
2648 be64_to_cpu(crq->query_capability.number);
2649 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2650 adapter->max_rx_add_queues);
2652 case MIN_TX_ENTRIES_PER_SUBCRQ:
2653 adapter->min_tx_entries_per_subcrq =
2654 be64_to_cpu(crq->query_capability.number);
2655 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2656 adapter->min_tx_entries_per_subcrq);
2658 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2659 adapter->min_rx_add_entries_per_subcrq =
2660 be64_to_cpu(crq->query_capability.number);
2661 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2662 adapter->min_rx_add_entries_per_subcrq);
2664 case MAX_TX_ENTRIES_PER_SUBCRQ:
2665 adapter->max_tx_entries_per_subcrq =
2666 be64_to_cpu(crq->query_capability.number);
2667 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2668 adapter->max_tx_entries_per_subcrq);
2670 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2671 adapter->max_rx_add_entries_per_subcrq =
2672 be64_to_cpu(crq->query_capability.number);
2673 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2674 adapter->max_rx_add_entries_per_subcrq);
2676 case TCP_IP_OFFLOAD:
2677 adapter->tcp_ip_offload =
2678 be64_to_cpu(crq->query_capability.number);
2679 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2680 adapter->tcp_ip_offload);
2682 case PROMISC_SUPPORTED:
2683 adapter->promisc_supported =
2684 be64_to_cpu(crq->query_capability.number);
2685 netdev_dbg(netdev, "promisc_supported = %lld\n",
2686 adapter->promisc_supported);
2689 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2690 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2691 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2694 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2695 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2696 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2698 case MAX_MULTICAST_FILTERS:
2699 adapter->max_multicast_filters =
2700 be64_to_cpu(crq->query_capability.number);
2701 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2702 adapter->max_multicast_filters);
2704 case VLAN_HEADER_INSERTION:
2705 adapter->vlan_header_insertion =
2706 be64_to_cpu(crq->query_capability.number);
2707 if (adapter->vlan_header_insertion)
2708 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2709 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2710 adapter->vlan_header_insertion);
2712 case MAX_TX_SG_ENTRIES:
2713 adapter->max_tx_sg_entries =
2714 be64_to_cpu(crq->query_capability.number);
2715 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2716 adapter->max_tx_sg_entries);
2718 case RX_SG_SUPPORTED:
2719 adapter->rx_sg_supported =
2720 be64_to_cpu(crq->query_capability.number);
2721 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2722 adapter->rx_sg_supported);
2724 case OPT_TX_COMP_SUB_QUEUES:
2725 adapter->opt_tx_comp_sub_queues =
2726 be64_to_cpu(crq->query_capability.number);
2727 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2728 adapter->opt_tx_comp_sub_queues);
2730 case OPT_RX_COMP_QUEUES:
2731 adapter->opt_rx_comp_queues =
2732 be64_to_cpu(crq->query_capability.number);
2733 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2734 adapter->opt_rx_comp_queues);
2736 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2737 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2738 be64_to_cpu(crq->query_capability.number);
2739 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2740 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2742 case OPT_TX_ENTRIES_PER_SUBCRQ:
2743 adapter->opt_tx_entries_per_subcrq =
2744 be64_to_cpu(crq->query_capability.number);
2745 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2746 adapter->opt_tx_entries_per_subcrq);
2748 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2749 adapter->opt_rxba_entries_per_subcrq =
2750 be64_to_cpu(crq->query_capability.number);
2751 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2752 adapter->opt_rxba_entries_per_subcrq);
2754 case TX_RX_DESC_REQ:
2755 adapter->tx_rx_desc_req = crq->query_capability.number;
2756 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2757 adapter->tx_rx_desc_req);
2761 netdev_err(netdev, "Got invalid cap rsp %d\n",
2762 crq->query_capability.capability);
2766 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2767 adapter->wait_capability = false;
2768 init_sub_crqs(adapter, 0);
2769 /* We're done querying the capabilities, initialize sub-crqs */
2773 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
2775 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
2776 struct device *dev = &adapter->vdev->dev;
2777 struct ibmvnic_error_buff *error_buff, *tmp2;
2778 unsigned long flags;
2779 unsigned long flags2;
2781 spin_lock_irqsave(&adapter->inflight_lock, flags);
2782 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
2783 switch (inflight_cmd->crq.generic.cmd) {
2785 dma_unmap_single(dev, adapter->login_buf_token,
2786 adapter->login_buf_sz,
2788 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2789 adapter->login_rsp_buf_sz,
2791 kfree(adapter->login_rsp_buf);
2792 kfree(adapter->login_buf);
2794 case REQUEST_ERROR_INFO:
2795 spin_lock_irqsave(&adapter->error_list_lock, flags2);
2796 list_for_each_entry_safe(error_buff, tmp2,
2797 &adapter->errors, list) {
2798 dma_unmap_single(dev, error_buff->dma,
2801 kfree(error_buff->buff);
2802 list_del(&error_buff->list);
2805 spin_unlock_irqrestore(&adapter->error_list_lock,
2809 list_del(&inflight_cmd->list);
2810 kfree(inflight_cmd);
2812 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2815 static void ibmvnic_xport_event(struct work_struct *work)
2817 struct ibmvnic_adapter *adapter = container_of(work,
2818 struct ibmvnic_adapter,
2820 struct device *dev = &adapter->vdev->dev;
2823 ibmvnic_free_inflight(adapter);
2824 release_sub_crqs(adapter);
2825 if (adapter->migrated) {
2826 rc = ibmvnic_reenable_crq_queue(adapter);
2828 dev_err(dev, "Error after enable rc=%ld\n", rc);
2829 adapter->migrated = false;
2830 rc = ibmvnic_send_crq_init(adapter);
2832 dev_err(dev, "Error sending init rc=%ld\n", rc);
2836 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2837 struct ibmvnic_adapter *adapter)
2839 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2840 struct net_device *netdev = adapter->netdev;
2841 struct device *dev = &adapter->vdev->dev;
2844 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2845 ((unsigned long int *)crq)[0],
2846 ((unsigned long int *)crq)[1]);
2847 switch (gen_crq->first) {
2848 case IBMVNIC_CRQ_INIT_RSP:
2849 switch (gen_crq->cmd) {
2850 case IBMVNIC_CRQ_INIT:
2851 dev_info(dev, "Partner initialized\n");
2852 /* Send back a response */
2853 rc = ibmvnic_send_crq_init_complete(adapter);
2855 schedule_work(&adapter->vnic_crq_init);
2857 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2859 case IBMVNIC_CRQ_INIT_COMPLETE:
2860 dev_info(dev, "Partner initialization complete\n");
2861 send_version_xchg(adapter);
2864 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2867 case IBMVNIC_CRQ_XPORT_EVENT:
2868 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2869 dev_info(dev, "Re-enabling adapter\n");
2870 adapter->migrated = true;
2871 schedule_work(&adapter->ibmvnic_xport);
2872 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2873 dev_info(dev, "Backing device failover detected\n");
2874 netif_carrier_off(netdev);
2875 adapter->failover = true;
2877 /* The adapter lost the connection */
2878 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2880 schedule_work(&adapter->ibmvnic_xport);
2883 case IBMVNIC_CRQ_CMD_RSP:
2886 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2891 switch (gen_crq->cmd) {
2892 case VERSION_EXCHANGE_RSP:
2893 rc = crq->version_exchange_rsp.rc.code;
2895 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2898 dev_info(dev, "Partner protocol version is %d\n",
2899 crq->version_exchange_rsp.version);
2900 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2903 be16_to_cpu(crq->version_exchange_rsp.version);
2904 send_cap_queries(adapter);
2906 case QUERY_CAPABILITY_RSP:
2907 handle_query_cap_rsp(crq, adapter);
2910 handle_query_map_rsp(crq, adapter);
2912 case REQUEST_MAP_RSP:
2913 handle_request_map_rsp(crq, adapter);
2915 case REQUEST_UNMAP_RSP:
2916 handle_request_unmap_rsp(crq, adapter);
2918 case REQUEST_CAPABILITY_RSP:
2919 handle_request_cap_rsp(crq, adapter);
2922 netdev_dbg(netdev, "Got Login Response\n");
2923 handle_login_rsp(crq, adapter);
2925 case LOGICAL_LINK_STATE_RSP:
2926 netdev_dbg(netdev, "Got Logical Link State Response\n");
2927 adapter->logical_link_state =
2928 crq->logical_link_state_rsp.link_state;
2930 case LINK_STATE_INDICATION:
2931 netdev_dbg(netdev, "Got Logical Link State Indication\n");
2932 adapter->phys_link_state =
2933 crq->link_state_indication.phys_link_state;
2934 adapter->logical_link_state =
2935 crq->link_state_indication.logical_link_state;
2937 case CHANGE_MAC_ADDR_RSP:
2938 netdev_dbg(netdev, "Got MAC address change Response\n");
2939 handle_change_mac_rsp(crq, adapter);
2941 case ERROR_INDICATION:
2942 netdev_dbg(netdev, "Got Error Indication\n");
2943 handle_error_indication(crq, adapter);
2945 case REQUEST_ERROR_RSP:
2946 netdev_dbg(netdev, "Got Error Detail Response\n");
2947 handle_error_info_rsp(crq, adapter);
2949 case REQUEST_STATISTICS_RSP:
2950 netdev_dbg(netdev, "Got Statistics Response\n");
2951 complete(&adapter->stats_done);
2953 case QUERY_IP_OFFLOAD_RSP:
2954 netdev_dbg(netdev, "Got Query IP offload Response\n");
2955 handle_query_ip_offload_rsp(adapter);
2957 case MULTICAST_CTRL_RSP:
2958 netdev_dbg(netdev, "Got multicast control Response\n");
2960 case CONTROL_IP_OFFLOAD_RSP:
2961 netdev_dbg(netdev, "Got Control IP offload Response\n");
2962 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
2963 sizeof(adapter->ip_offload_ctrl),
2965 complete(&adapter->init_done);
2967 case COLLECT_FW_TRACE_RSP:
2968 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
2969 complete(&adapter->fw_done);
2972 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
2977 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
2979 struct ibmvnic_adapter *adapter = instance;
2980 unsigned long flags;
2982 spin_lock_irqsave(&adapter->crq.lock, flags);
2983 vio_disable_interrupts(adapter->vdev);
2984 tasklet_schedule(&adapter->tasklet);
2985 spin_unlock_irqrestore(&adapter->crq.lock, flags);
2989 static void ibmvnic_tasklet(void *data)
2991 struct ibmvnic_adapter *adapter = data;
2992 struct ibmvnic_crq_queue *queue = &adapter->crq;
2993 struct vio_dev *vdev = adapter->vdev;
2994 union ibmvnic_crq *crq;
2995 unsigned long flags;
2998 spin_lock_irqsave(&queue->lock, flags);
2999 vio_disable_interrupts(vdev);
3001 /* Pull all the valid messages off the CRQ */
3002 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3003 ibmvnic_handle_crq(crq, adapter);
3004 crq->generic.first = 0;
3006 vio_enable_interrupts(vdev);
3007 crq = ibmvnic_next_crq(adapter);
3009 vio_disable_interrupts(vdev);
3010 ibmvnic_handle_crq(crq, adapter);
3011 crq->generic.first = 0;
3013 /* remain in tasklet until all
3014 * capabilities responses are received
3016 if (!adapter->wait_capability)
3020 /* if capabilities CRQ's were sent in this tasklet, the following
3021 * tasklet must wait until all responses are received
3023 if (atomic_read(&adapter->running_cap_crqs) != 0)
3024 adapter->wait_capability = true;
3025 spin_unlock_irqrestore(&queue->lock, flags);
3028 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3030 struct vio_dev *vdev = adapter->vdev;
3034 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3035 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3038 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3043 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3045 struct ibmvnic_crq_queue *crq = &adapter->crq;
3046 struct device *dev = &adapter->vdev->dev;
3047 struct vio_dev *vdev = adapter->vdev;
3052 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3053 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3055 /* Clean out the queue */
3056 memset(crq->msgs, 0, PAGE_SIZE);
3059 /* And re-open it again */
3060 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3061 crq->msg_token, PAGE_SIZE);
3064 /* Adapter is good, but other end is not ready */
3065 dev_warn(dev, "Partner adapter not ready\n");
3067 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3072 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3074 struct ibmvnic_crq_queue *crq = &adapter->crq;
3075 struct vio_dev *vdev = adapter->vdev;
3081 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3082 free_irq(vdev->irq, adapter);
3083 tasklet_kill(&adapter->tasklet);
3085 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3086 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3088 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3090 free_page((unsigned long)crq->msgs);
3094 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3096 struct ibmvnic_crq_queue *crq = &adapter->crq;
3097 struct device *dev = &adapter->vdev->dev;
3098 struct vio_dev *vdev = adapter->vdev;
3099 int rc, retrc = -ENOMEM;
3104 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3105 /* Should we allocate more than one page? */
3110 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3111 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3113 if (dma_mapping_error(dev, crq->msg_token))
3116 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3117 crq->msg_token, PAGE_SIZE);
3119 if (rc == H_RESOURCE)
3120 /* maybe kexecing and resource is busy. try a reset */
3121 rc = ibmvnic_reset_crq(adapter);
3124 if (rc == H_CLOSED) {
3125 dev_warn(dev, "Partner adapter not ready\n");
3127 dev_warn(dev, "Error %d opening adapter\n", rc);
3128 goto reg_crq_failed;
3133 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3134 (unsigned long)adapter);
3136 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3137 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3140 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3142 goto req_irq_failed;
3145 rc = vio_enable_interrupts(vdev);
3147 dev_err(dev, "Error %d enabling interrupts\n", rc);
3148 goto req_irq_failed;
3152 spin_lock_init(&crq->lock);
3157 tasklet_kill(&adapter->tasklet);
3159 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3160 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3162 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3164 free_page((unsigned long)crq->msgs);
3169 static void handle_crq_init_rsp(struct work_struct *work)
3171 struct ibmvnic_adapter *adapter = container_of(work,
3172 struct ibmvnic_adapter,
3174 struct device *dev = &adapter->vdev->dev;
3175 struct net_device *netdev = adapter->netdev;
3176 unsigned long timeout = msecs_to_jiffies(30000);
3177 bool restart = false;
3180 if (adapter->failover) {
3181 release_sub_crqs(adapter);
3182 if (netif_running(netdev)) {
3183 netif_tx_disable(netdev);
3184 ibmvnic_close(netdev);
3189 reinit_completion(&adapter->init_done);
3190 send_version_xchg(adapter);
3191 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3192 dev_err(dev, "Passive init timeout\n");
3196 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3198 if (adapter->failover) {
3199 adapter->failover = false;
3201 rc = ibmvnic_open(netdev);
3203 goto restart_failed;
3205 netif_carrier_on(netdev);
3209 rc = register_netdev(netdev);
3212 "failed to register netdev rc=%d\n", rc);
3213 goto register_failed;
3215 dev_info(dev, "ibmvnic registered\n");
3220 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3222 release_sub_crqs(adapter);
3224 dev_err(dev, "Passive initialization was not successful\n");
3227 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3229 struct device *dev = &adapter->vdev->dev;
3230 unsigned long timeout = msecs_to_jiffies(30000);
3233 rc = init_crq_queue(adapter);
3235 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3239 adapter->stats_token = dma_map_single(dev, &adapter->stats,
3240 sizeof(struct ibmvnic_statistics),
3242 if (dma_mapping_error(dev, adapter->stats_token)) {
3243 release_crq_queue(adapter);
3244 dev_err(dev, "Couldn't map stats buffer\n");
3248 init_completion(&adapter->init_done);
3249 ibmvnic_send_crq_init(adapter);
3250 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3251 dev_err(dev, "Initialization sequence timed out\n");
3252 release_crq_queue(adapter);
3259 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3261 struct ibmvnic_adapter *adapter;
3262 struct net_device *netdev;
3263 unsigned char *mac_addr_p;
3266 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3269 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3270 VETH_MAC_ADDR, NULL);
3273 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3274 __FILE__, __LINE__);
3278 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3279 IBMVNIC_MAX_TX_QUEUES);
3283 adapter = netdev_priv(netdev);
3284 dev_set_drvdata(&dev->dev, netdev);
3285 adapter->vdev = dev;
3286 adapter->netdev = netdev;
3287 adapter->failover = false;
3289 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3290 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3291 netdev->irq = dev->irq;
3292 netdev->netdev_ops = &ibmvnic_netdev_ops;
3293 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3294 SET_NETDEV_DEV(netdev, &dev->dev);
3296 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3297 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3299 spin_lock_init(&adapter->stats_lock);
3301 INIT_LIST_HEAD(&adapter->errors);
3302 INIT_LIST_HEAD(&adapter->inflight);
3303 spin_lock_init(&adapter->error_list_lock);
3304 spin_lock_init(&adapter->inflight_lock);
3306 rc = ibmvnic_init(adapter);
3308 free_netdev(netdev);
3312 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3313 adapter->is_closed = false;
3315 rc = register_netdev(netdev);
3317 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3318 free_netdev(netdev);
3321 dev_info(&dev->dev, "ibmvnic registered\n");
3326 static int ibmvnic_remove(struct vio_dev *dev)
3328 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3330 unregister_netdev(netdev);
3331 free_netdev(netdev);
3332 dev_set_drvdata(&dev->dev, NULL);
3337 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3339 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3340 struct ibmvnic_adapter *adapter;
3341 struct iommu_table *tbl;
3342 unsigned long ret = 0;
3345 tbl = get_iommu_table_base(&vdev->dev);
3347 /* netdev inits at probe time along with the structures we need below*/
3349 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3351 adapter = netdev_priv(netdev);
3353 ret += PAGE_SIZE; /* the crq message queue */
3354 ret += adapter->bounce_buffer_size;
3355 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3357 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3358 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3360 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3362 ret += adapter->rx_pool[i].size *
3363 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3368 static int ibmvnic_resume(struct device *dev)
3370 struct net_device *netdev = dev_get_drvdata(dev);
3371 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3374 /* kick the interrupt handlers just in case we lost an interrupt */
3375 for (i = 0; i < adapter->req_rx_queues; i++)
3376 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3377 adapter->rx_scrq[i]);
3382 static struct vio_device_id ibmvnic_device_table[] = {
3383 {"network", "IBM,vnic"},
3386 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3388 static const struct dev_pm_ops ibmvnic_pm_ops = {
3389 .resume = ibmvnic_resume
3392 static struct vio_driver ibmvnic_driver = {
3393 .id_table = ibmvnic_device_table,
3394 .probe = ibmvnic_probe,
3395 .remove = ibmvnic_remove,
3396 .get_desired_dma = ibmvnic_get_desired_dma,
3397 .name = ibmvnic_driver_name,
3398 .pm = &ibmvnic_pm_ops,
3401 /* module functions */
3402 static int __init ibmvnic_module_init(void)
3404 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3405 IBMVNIC_DRIVER_VERSION);
3407 return vio_register_driver(&ibmvnic_driver);
3410 static void __exit ibmvnic_module_exit(void)
3412 vio_unregister_driver(&ibmvnic_driver);
3415 module_init(ibmvnic_module_init);
3416 module_exit(ibmvnic_module_exit);