1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_query_map(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_query_cap(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
108 static void release_crq_queue(struct ibmvnic_adapter *);
109 static int __ibmvnic_set_mac(struct net_device *, u8 *);
110 static int init_crq_queue(struct ibmvnic_adapter *adapter);
111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
113 struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
162 * ibmvnic_wait_for_completion - Check device state and wait for completion
163 * @adapter: private device data
164 * @comp_done: completion structure to wait for
165 * @timeout: time to wait in milliseconds
167 * Wait for a completion signal or until the timeout limit is reached
168 * while checking that the device is still active.
170 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 struct completion *comp_done,
172 unsigned long timeout)
174 struct net_device *netdev;
175 unsigned long div_timeout;
178 netdev = adapter->netdev;
180 div_timeout = msecs_to_jiffies(timeout / retry);
182 if (!adapter->crq.active) {
183 netdev_err(netdev, "Device down!\n");
188 if (wait_for_completion_timeout(comp_done, div_timeout))
191 netdev_err(netdev, "Operation timed out.\n");
195 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 struct ibmvnic_long_term_buff *ltb, int size)
198 struct device *dev = &adapter->vdev->dev;
202 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
206 dev_err(dev, "Couldn't alloc long term buffer\n");
209 ltb->map_id = adapter->map_id;
212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
214 reinit_completion(&adapter->fw_done);
215 rc = send_request_map(adapter, ltb->addr,
216 ltb->size, ltb->map_id);
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
219 mutex_unlock(&adapter->fw_lock);
223 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
226 "Long term map request aborted or timed out,rc = %d\n",
228 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
229 mutex_unlock(&adapter->fw_lock);
233 if (adapter->fw_done_rc) {
234 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235 adapter->fw_done_rc);
236 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
237 mutex_unlock(&adapter->fw_lock);
240 mutex_unlock(&adapter->fw_lock);
244 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245 struct ibmvnic_long_term_buff *ltb)
247 struct device *dev = &adapter->vdev->dev;
252 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253 adapter->reset_reason != VNIC_RESET_MOBILITY)
254 send_request_unmap(adapter, ltb->map_id);
255 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
258 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259 struct ibmvnic_long_term_buff *ltb)
261 struct device *dev = &adapter->vdev->dev;
264 memset(ltb->buff, 0, ltb->size);
266 mutex_lock(&adapter->fw_lock);
267 adapter->fw_done_rc = 0;
269 reinit_completion(&adapter->fw_done);
270 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
272 mutex_unlock(&adapter->fw_lock);
276 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
279 "Reset failed, long term map request timed out or aborted\n");
280 mutex_unlock(&adapter->fw_lock);
284 if (adapter->fw_done_rc) {
286 "Reset failed, attempting to free and reallocate buffer\n");
287 free_long_term_buff(adapter, ltb);
288 mutex_unlock(&adapter->fw_lock);
289 return alloc_long_term_buff(adapter, ltb, ltb->size);
291 mutex_unlock(&adapter->fw_lock);
295 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
299 for (i = 0; i < adapter->num_active_rx_pools; i++)
300 adapter->rx_pool[i].active = 0;
303 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304 struct ibmvnic_rx_pool *pool)
306 int count = pool->size - atomic_read(&pool->available);
307 u64 handle = adapter->rx_scrq[pool->index]->handle;
308 struct device *dev = &adapter->vdev->dev;
309 int buffers_added = 0;
310 unsigned long lpar_rc;
311 union sub_crq sub_crq;
323 for (i = 0; i < count; ++i) {
324 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
326 dev_err(dev, "Couldn't replenish rx buff\n");
327 adapter->replenish_no_mem++;
331 index = pool->free_map[pool->next_free];
333 if (pool->rx_buff[index].skb)
334 dev_err(dev, "Inconsistent free_map!\n");
336 /* Copy the skb to the long term mapped DMA buffer */
337 offset = index * pool->buff_size;
338 dst = pool->long_term_buff.buff + offset;
339 memset(dst, 0, pool->buff_size);
340 dma_addr = pool->long_term_buff.addr + offset;
341 pool->rx_buff[index].data = dst;
343 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344 pool->rx_buff[index].dma = dma_addr;
345 pool->rx_buff[index].skb = skb;
346 pool->rx_buff[index].pool_index = pool->index;
347 pool->rx_buff[index].size = pool->buff_size;
349 memset(&sub_crq, 0, sizeof(sub_crq));
350 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351 sub_crq.rx_add.correlator =
352 cpu_to_be64((u64)&pool->rx_buff[index]);
353 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
356 /* The length field of the sCRQ is defined to be 24 bits so the
357 * buffer size needs to be left shifted by a byte before it is
358 * converted to big endian to prevent the last byte from being
361 #ifdef __LITTLE_ENDIAN__
364 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
366 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
367 if (lpar_rc != H_SUCCESS)
371 adapter->replenish_add_buff_success++;
372 pool->next_free = (pool->next_free + 1) % pool->size;
374 atomic_add(buffers_added, &pool->available);
378 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
380 pool->free_map[pool->next_free] = index;
381 pool->rx_buff[index].skb = NULL;
383 dev_kfree_skb_any(skb);
384 adapter->replenish_add_buff_failure++;
385 atomic_add(buffers_added, &pool->available);
387 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
388 /* Disable buffer pool replenishment and report carrier off if
389 * queue is closed or pending failover.
390 * Firmware guarantees that a signal will be sent to the
391 * driver, triggering a reset.
393 deactivate_rx_pools(adapter);
394 netif_carrier_off(adapter->netdev);
398 static void replenish_pools(struct ibmvnic_adapter *adapter)
402 adapter->replenish_task_cycles++;
403 for (i = 0; i < adapter->num_active_rx_pools; i++) {
404 if (adapter->rx_pool[i].active)
405 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
409 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
411 kfree(adapter->tx_stats_buffers);
412 kfree(adapter->rx_stats_buffers);
413 adapter->tx_stats_buffers = NULL;
414 adapter->rx_stats_buffers = NULL;
417 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
419 adapter->tx_stats_buffers =
420 kcalloc(IBMVNIC_MAX_QUEUES,
421 sizeof(struct ibmvnic_tx_queue_stats),
423 if (!adapter->tx_stats_buffers)
426 adapter->rx_stats_buffers =
427 kcalloc(IBMVNIC_MAX_QUEUES,
428 sizeof(struct ibmvnic_rx_queue_stats),
430 if (!adapter->rx_stats_buffers)
436 static void release_stats_token(struct ibmvnic_adapter *adapter)
438 struct device *dev = &adapter->vdev->dev;
440 if (!adapter->stats_token)
443 dma_unmap_single(dev, adapter->stats_token,
444 sizeof(struct ibmvnic_statistics),
446 adapter->stats_token = 0;
449 static int init_stats_token(struct ibmvnic_adapter *adapter)
451 struct device *dev = &adapter->vdev->dev;
454 stok = dma_map_single(dev, &adapter->stats,
455 sizeof(struct ibmvnic_statistics),
457 if (dma_mapping_error(dev, stok)) {
458 dev_err(dev, "Couldn't map stats buffer\n");
462 adapter->stats_token = stok;
463 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
467 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
469 struct ibmvnic_rx_pool *rx_pool;
474 if (!adapter->rx_pool)
477 buff_size = adapter->cur_rx_buf_sz;
478 rx_scrqs = adapter->num_active_rx_pools;
479 for (i = 0; i < rx_scrqs; i++) {
480 rx_pool = &adapter->rx_pool[i];
482 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
484 if (rx_pool->buff_size != buff_size) {
485 free_long_term_buff(adapter, &rx_pool->long_term_buff);
486 rx_pool->buff_size = buff_size;
487 rc = alloc_long_term_buff(adapter,
488 &rx_pool->long_term_buff,
492 rc = reset_long_term_buff(adapter,
493 &rx_pool->long_term_buff);
499 for (j = 0; j < rx_pool->size; j++)
500 rx_pool->free_map[j] = j;
502 memset(rx_pool->rx_buff, 0,
503 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
505 atomic_set(&rx_pool->available, 0);
506 rx_pool->next_alloc = 0;
507 rx_pool->next_free = 0;
514 static void release_rx_pools(struct ibmvnic_adapter *adapter)
516 struct ibmvnic_rx_pool *rx_pool;
519 if (!adapter->rx_pool)
522 for (i = 0; i < adapter->num_active_rx_pools; i++) {
523 rx_pool = &adapter->rx_pool[i];
525 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
527 kfree(rx_pool->free_map);
528 free_long_term_buff(adapter, &rx_pool->long_term_buff);
530 if (!rx_pool->rx_buff)
533 for (j = 0; j < rx_pool->size; j++) {
534 if (rx_pool->rx_buff[j].skb) {
535 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
536 rx_pool->rx_buff[j].skb = NULL;
540 kfree(rx_pool->rx_buff);
543 kfree(adapter->rx_pool);
544 adapter->rx_pool = NULL;
545 adapter->num_active_rx_pools = 0;
548 static int init_rx_pools(struct net_device *netdev)
550 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
551 struct device *dev = &adapter->vdev->dev;
552 struct ibmvnic_rx_pool *rx_pool;
557 rxadd_subcrqs = adapter->num_active_rx_scrqs;
558 buff_size = adapter->cur_rx_buf_sz;
560 adapter->rx_pool = kcalloc(rxadd_subcrqs,
561 sizeof(struct ibmvnic_rx_pool),
563 if (!adapter->rx_pool) {
564 dev_err(dev, "Failed to allocate rx pools\n");
568 adapter->num_active_rx_pools = rxadd_subcrqs;
570 for (i = 0; i < rxadd_subcrqs; i++) {
571 rx_pool = &adapter->rx_pool[i];
573 netdev_dbg(adapter->netdev,
574 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
575 i, adapter->req_rx_add_entries_per_subcrq,
578 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
580 rx_pool->buff_size = buff_size;
583 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
585 if (!rx_pool->free_map) {
586 release_rx_pools(adapter);
590 rx_pool->rx_buff = kcalloc(rx_pool->size,
591 sizeof(struct ibmvnic_rx_buff),
593 if (!rx_pool->rx_buff) {
594 dev_err(dev, "Couldn't alloc rx buffers\n");
595 release_rx_pools(adapter);
599 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
600 rx_pool->size * rx_pool->buff_size)) {
601 release_rx_pools(adapter);
605 for (j = 0; j < rx_pool->size; ++j)
606 rx_pool->free_map[j] = j;
608 atomic_set(&rx_pool->available, 0);
609 rx_pool->next_alloc = 0;
610 rx_pool->next_free = 0;
616 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
617 struct ibmvnic_tx_pool *tx_pool)
621 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
625 memset(tx_pool->tx_buff, 0,
626 tx_pool->num_buffers *
627 sizeof(struct ibmvnic_tx_buff));
629 for (i = 0; i < tx_pool->num_buffers; i++)
630 tx_pool->free_map[i] = i;
632 tx_pool->consumer_index = 0;
633 tx_pool->producer_index = 0;
638 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
643 if (!adapter->tx_pool)
646 tx_scrqs = adapter->num_active_tx_pools;
647 for (i = 0; i < tx_scrqs; i++) {
648 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
651 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
659 static void release_vpd_data(struct ibmvnic_adapter *adapter)
664 kfree(adapter->vpd->buff);
670 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
671 struct ibmvnic_tx_pool *tx_pool)
673 kfree(tx_pool->tx_buff);
674 kfree(tx_pool->free_map);
675 free_long_term_buff(adapter, &tx_pool->long_term_buff);
678 static void release_tx_pools(struct ibmvnic_adapter *adapter)
682 if (!adapter->tx_pool)
685 for (i = 0; i < adapter->num_active_tx_pools; i++) {
686 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
687 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
690 kfree(adapter->tx_pool);
691 adapter->tx_pool = NULL;
692 kfree(adapter->tso_pool);
693 adapter->tso_pool = NULL;
694 adapter->num_active_tx_pools = 0;
697 static int init_one_tx_pool(struct net_device *netdev,
698 struct ibmvnic_tx_pool *tx_pool,
699 int num_entries, int buf_size)
701 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 tx_pool->tx_buff = kcalloc(num_entries,
705 sizeof(struct ibmvnic_tx_buff),
707 if (!tx_pool->tx_buff)
710 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
711 num_entries * buf_size))
714 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
715 if (!tx_pool->free_map)
718 for (i = 0; i < num_entries; i++)
719 tx_pool->free_map[i] = i;
721 tx_pool->consumer_index = 0;
722 tx_pool->producer_index = 0;
723 tx_pool->num_buffers = num_entries;
724 tx_pool->buf_size = buf_size;
729 static int init_tx_pools(struct net_device *netdev)
731 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
735 tx_subcrqs = adapter->num_active_tx_scrqs;
736 adapter->tx_pool = kcalloc(tx_subcrqs,
737 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
738 if (!adapter->tx_pool)
741 adapter->tso_pool = kcalloc(tx_subcrqs,
742 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
743 if (!adapter->tso_pool)
746 adapter->num_active_tx_pools = tx_subcrqs;
748 for (i = 0; i < tx_subcrqs; i++) {
749 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
750 adapter->req_tx_entries_per_subcrq,
751 adapter->req_mtu + VLAN_HLEN);
753 release_tx_pools(adapter);
757 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
761 release_tx_pools(adapter);
769 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
773 if (adapter->napi_enabled)
776 for (i = 0; i < adapter->req_rx_queues; i++)
777 napi_enable(&adapter->napi[i]);
779 adapter->napi_enabled = true;
782 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
786 if (!adapter->napi_enabled)
789 for (i = 0; i < adapter->req_rx_queues; i++) {
790 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
791 napi_disable(&adapter->napi[i]);
794 adapter->napi_enabled = false;
797 static int init_napi(struct ibmvnic_adapter *adapter)
801 adapter->napi = kcalloc(adapter->req_rx_queues,
802 sizeof(struct napi_struct), GFP_KERNEL);
806 for (i = 0; i < adapter->req_rx_queues; i++) {
807 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
808 netif_napi_add(adapter->netdev, &adapter->napi[i],
809 ibmvnic_poll, NAPI_POLL_WEIGHT);
812 adapter->num_active_rx_napi = adapter->req_rx_queues;
816 static void release_napi(struct ibmvnic_adapter *adapter)
823 for (i = 0; i < adapter->num_active_rx_napi; i++) {
824 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
825 netif_napi_del(&adapter->napi[i]);
828 kfree(adapter->napi);
829 adapter->napi = NULL;
830 adapter->num_active_rx_napi = 0;
831 adapter->napi_enabled = false;
834 static int ibmvnic_login(struct net_device *netdev)
836 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
837 unsigned long timeout = msecs_to_jiffies(30000);
845 if (retry_count > retries) {
846 netdev_warn(netdev, "Login attempts exceeded\n");
850 adapter->init_done_rc = 0;
851 reinit_completion(&adapter->init_done);
852 rc = send_login(adapter);
854 netdev_warn(netdev, "Unable to login\n");
858 if (!wait_for_completion_timeout(&adapter->init_done,
860 netdev_warn(netdev, "Login timed out, retrying...\n");
862 adapter->init_done_rc = 0;
867 if (adapter->init_done_rc == ABORTED) {
868 netdev_warn(netdev, "Login aborted, retrying...\n");
870 adapter->init_done_rc = 0;
872 /* FW or device may be busy, so
873 * wait a bit before retrying login
876 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
878 release_sub_crqs(adapter, 1);
882 "Received partial success, retrying...\n");
883 adapter->init_done_rc = 0;
884 reinit_completion(&adapter->init_done);
885 send_query_cap(adapter);
886 if (!wait_for_completion_timeout(&adapter->init_done,
889 "Capabilities query timed out\n");
893 rc = init_sub_crqs(adapter);
896 "SCRQ initialization failed\n");
900 rc = init_sub_crq_irqs(adapter);
903 "SCRQ irq initialization failed\n");
906 } else if (adapter->init_done_rc) {
907 netdev_warn(netdev, "Adapter login failed\n");
912 __ibmvnic_set_mac(netdev, adapter->mac_addr);
917 static void release_login_buffer(struct ibmvnic_adapter *adapter)
919 kfree(adapter->login_buf);
920 adapter->login_buf = NULL;
923 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
925 kfree(adapter->login_rsp_buf);
926 adapter->login_rsp_buf = NULL;
929 static void release_resources(struct ibmvnic_adapter *adapter)
931 release_vpd_data(adapter);
933 release_tx_pools(adapter);
934 release_rx_pools(adapter);
936 release_napi(adapter);
937 release_login_rsp_buffer(adapter);
940 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
942 struct net_device *netdev = adapter->netdev;
943 unsigned long timeout = msecs_to_jiffies(30000);
944 union ibmvnic_crq crq;
948 netdev_dbg(netdev, "setting link state %d\n", link_state);
950 memset(&crq, 0, sizeof(crq));
951 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
952 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
953 crq.logical_link_state.link_state = link_state;
958 reinit_completion(&adapter->init_done);
959 rc = ibmvnic_send_crq(adapter, &crq);
961 netdev_err(netdev, "Failed to set link state\n");
965 if (!wait_for_completion_timeout(&adapter->init_done,
967 netdev_err(netdev, "timeout setting link state\n");
971 if (adapter->init_done_rc == PARTIALSUCCESS) {
972 /* Partuial success, delay and re-send */
975 } else if (adapter->init_done_rc) {
976 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
977 adapter->init_done_rc);
978 return adapter->init_done_rc;
985 static int set_real_num_queues(struct net_device *netdev)
987 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
990 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
991 adapter->req_tx_queues, adapter->req_rx_queues);
993 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
995 netdev_err(netdev, "failed to set the number of tx queues\n");
999 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1001 netdev_err(netdev, "failed to set the number of rx queues\n");
1006 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1008 struct device *dev = &adapter->vdev->dev;
1009 union ibmvnic_crq crq;
1013 if (adapter->vpd->buff)
1014 len = adapter->vpd->len;
1016 mutex_lock(&adapter->fw_lock);
1017 adapter->fw_done_rc = 0;
1018 reinit_completion(&adapter->fw_done);
1020 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1021 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1022 rc = ibmvnic_send_crq(adapter, &crq);
1024 mutex_unlock(&adapter->fw_lock);
1028 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1030 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1031 mutex_unlock(&adapter->fw_lock);
1034 mutex_unlock(&adapter->fw_lock);
1036 if (!adapter->vpd->len)
1039 if (!adapter->vpd->buff)
1040 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1041 else if (adapter->vpd->len != len)
1042 adapter->vpd->buff =
1043 krealloc(adapter->vpd->buff,
1044 adapter->vpd->len, GFP_KERNEL);
1046 if (!adapter->vpd->buff) {
1047 dev_err(dev, "Could allocate VPD buffer\n");
1051 adapter->vpd->dma_addr =
1052 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1054 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1055 dev_err(dev, "Could not map VPD buffer\n");
1056 kfree(adapter->vpd->buff);
1057 adapter->vpd->buff = NULL;
1061 mutex_lock(&adapter->fw_lock);
1062 adapter->fw_done_rc = 0;
1063 reinit_completion(&adapter->fw_done);
1065 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1066 crq.get_vpd.cmd = GET_VPD;
1067 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1068 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1069 rc = ibmvnic_send_crq(adapter, &crq);
1071 kfree(adapter->vpd->buff);
1072 adapter->vpd->buff = NULL;
1073 mutex_unlock(&adapter->fw_lock);
1077 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1079 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1080 kfree(adapter->vpd->buff);
1081 adapter->vpd->buff = NULL;
1082 mutex_unlock(&adapter->fw_lock);
1086 mutex_unlock(&adapter->fw_lock);
1090 static int init_resources(struct ibmvnic_adapter *adapter)
1092 struct net_device *netdev = adapter->netdev;
1095 rc = set_real_num_queues(netdev);
1099 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1103 /* Vital Product Data (VPD) */
1104 rc = ibmvnic_get_vpd(adapter);
1106 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1110 adapter->map_id = 1;
1112 rc = init_napi(adapter);
1116 send_query_map(adapter);
1118 rc = init_rx_pools(netdev);
1122 rc = init_tx_pools(netdev);
1126 static int __ibmvnic_open(struct net_device *netdev)
1128 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1129 enum vnic_state prev_state = adapter->state;
1132 adapter->state = VNIC_OPENING;
1133 replenish_pools(adapter);
1134 ibmvnic_napi_enable(adapter);
1136 /* We're ready to receive frames, enable the sub-crq interrupts and
1137 * set the logical link state to up
1139 for (i = 0; i < adapter->req_rx_queues; i++) {
1140 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1141 if (prev_state == VNIC_CLOSED)
1142 enable_irq(adapter->rx_scrq[i]->irq);
1143 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1146 for (i = 0; i < adapter->req_tx_queues; i++) {
1147 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1148 if (prev_state == VNIC_CLOSED)
1149 enable_irq(adapter->tx_scrq[i]->irq);
1150 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1153 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1155 for (i = 0; i < adapter->req_rx_queues; i++)
1156 napi_disable(&adapter->napi[i]);
1157 release_resources(adapter);
1161 netif_tx_start_all_queues(netdev);
1163 if (prev_state == VNIC_CLOSED) {
1164 for (i = 0; i < adapter->req_rx_queues; i++)
1165 napi_schedule(&adapter->napi[i]);
1168 adapter->state = VNIC_OPEN;
1172 static int ibmvnic_open(struct net_device *netdev)
1174 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1177 /* If device failover is pending, just set device state and return.
1178 * Device operation will be handled by reset routine.
1180 if (adapter->failover_pending) {
1181 adapter->state = VNIC_OPEN;
1185 if (adapter->state != VNIC_CLOSED) {
1186 rc = ibmvnic_login(netdev);
1190 rc = init_resources(adapter);
1192 netdev_err(netdev, "failed to initialize resources\n");
1193 release_resources(adapter);
1198 rc = __ibmvnic_open(netdev);
1202 * If open fails due to a pending failover, set device state and
1203 * return. Device operation will be handled by reset routine.
1205 if (rc && adapter->failover_pending) {
1206 adapter->state = VNIC_OPEN;
1212 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1214 struct ibmvnic_rx_pool *rx_pool;
1215 struct ibmvnic_rx_buff *rx_buff;
1220 if (!adapter->rx_pool)
1223 rx_scrqs = adapter->num_active_rx_pools;
1224 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1226 /* Free any remaining skbs in the rx buffer pools */
1227 for (i = 0; i < rx_scrqs; i++) {
1228 rx_pool = &adapter->rx_pool[i];
1229 if (!rx_pool || !rx_pool->rx_buff)
1232 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1233 for (j = 0; j < rx_entries; j++) {
1234 rx_buff = &rx_pool->rx_buff[j];
1235 if (rx_buff && rx_buff->skb) {
1236 dev_kfree_skb_any(rx_buff->skb);
1237 rx_buff->skb = NULL;
1243 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1244 struct ibmvnic_tx_pool *tx_pool)
1246 struct ibmvnic_tx_buff *tx_buff;
1250 if (!tx_pool || !tx_pool->tx_buff)
1253 tx_entries = tx_pool->num_buffers;
1255 for (i = 0; i < tx_entries; i++) {
1256 tx_buff = &tx_pool->tx_buff[i];
1257 if (tx_buff && tx_buff->skb) {
1258 dev_kfree_skb_any(tx_buff->skb);
1259 tx_buff->skb = NULL;
1264 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1269 if (!adapter->tx_pool || !adapter->tso_pool)
1272 tx_scrqs = adapter->num_active_tx_pools;
1274 /* Free any remaining skbs in the tx buffer pools */
1275 for (i = 0; i < tx_scrqs; i++) {
1276 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1277 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1278 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1282 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1284 struct net_device *netdev = adapter->netdev;
1287 if (adapter->tx_scrq) {
1288 for (i = 0; i < adapter->req_tx_queues; i++)
1289 if (adapter->tx_scrq[i]->irq) {
1291 "Disabling tx_scrq[%d] irq\n", i);
1292 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1293 disable_irq(adapter->tx_scrq[i]->irq);
1297 if (adapter->rx_scrq) {
1298 for (i = 0; i < adapter->req_rx_queues; i++) {
1299 if (adapter->rx_scrq[i]->irq) {
1301 "Disabling rx_scrq[%d] irq\n", i);
1302 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1303 disable_irq(adapter->rx_scrq[i]->irq);
1309 static void ibmvnic_cleanup(struct net_device *netdev)
1311 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1313 /* ensure that transmissions are stopped if called by do_reset */
1314 if (test_bit(0, &adapter->resetting))
1315 netif_tx_disable(netdev);
1317 netif_tx_stop_all_queues(netdev);
1319 ibmvnic_napi_disable(adapter);
1320 ibmvnic_disable_irqs(adapter);
1322 clean_rx_pools(adapter);
1323 clean_tx_pools(adapter);
1326 static int __ibmvnic_close(struct net_device *netdev)
1328 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1331 adapter->state = VNIC_CLOSING;
1332 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1335 adapter->state = VNIC_CLOSED;
1339 static int ibmvnic_close(struct net_device *netdev)
1341 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1344 /* If device failover is pending, just set device state and return.
1345 * Device operation will be handled by reset routine.
1347 if (adapter->failover_pending) {
1348 adapter->state = VNIC_CLOSED;
1352 rc = __ibmvnic_close(netdev);
1353 ibmvnic_cleanup(netdev);
1359 * build_hdr_data - creates L2/L3/L4 header data buffer
1360 * @hdr_field - bitfield determining needed headers
1361 * @skb - socket buffer
1362 * @hdr_len - array of header lengths
1363 * @tot_len - total length of data
1365 * Reads hdr_field to determine which headers are needed by firmware.
1366 * Builds a buffer containing these headers. Saves individual header
1367 * lengths and total buffer length to be used to build descriptors.
1369 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1370 int *hdr_len, u8 *hdr_data)
1375 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1376 hdr_len[0] = sizeof(struct vlan_ethhdr);
1378 hdr_len[0] = sizeof(struct ethhdr);
1380 if (skb->protocol == htons(ETH_P_IP)) {
1381 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1382 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1383 hdr_len[2] = tcp_hdrlen(skb);
1384 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1385 hdr_len[2] = sizeof(struct udphdr);
1386 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1387 hdr_len[1] = sizeof(struct ipv6hdr);
1388 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1389 hdr_len[2] = tcp_hdrlen(skb);
1390 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1391 hdr_len[2] = sizeof(struct udphdr);
1392 } else if (skb->protocol == htons(ETH_P_ARP)) {
1393 hdr_len[1] = arp_hdr_len(skb->dev);
1397 memset(hdr_data, 0, 120);
1398 if ((hdr_field >> 6) & 1) {
1399 hdr = skb_mac_header(skb);
1400 memcpy(hdr_data, hdr, hdr_len[0]);
1404 if ((hdr_field >> 5) & 1) {
1405 hdr = skb_network_header(skb);
1406 memcpy(hdr_data + len, hdr, hdr_len[1]);
1410 if ((hdr_field >> 4) & 1) {
1411 hdr = skb_transport_header(skb);
1412 memcpy(hdr_data + len, hdr, hdr_len[2]);
1419 * create_hdr_descs - create header and header extension descriptors
1420 * @hdr_field - bitfield determining needed headers
1421 * @data - buffer containing header data
1422 * @len - length of data buffer
1423 * @hdr_len - array of individual header lengths
1424 * @scrq_arr - descriptor array
1426 * Creates header and, if needed, header extension descriptors and
1427 * places them in a descriptor array, scrq_arr
1430 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1431 union sub_crq *scrq_arr)
1433 union sub_crq hdr_desc;
1439 while (tmp_len > 0) {
1440 cur = hdr_data + len - tmp_len;
1442 memset(&hdr_desc, 0, sizeof(hdr_desc));
1443 if (cur != hdr_data) {
1444 data = hdr_desc.hdr_ext.data;
1445 tmp = tmp_len > 29 ? 29 : tmp_len;
1446 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1447 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1448 hdr_desc.hdr_ext.len = tmp;
1450 data = hdr_desc.hdr.data;
1451 tmp = tmp_len > 24 ? 24 : tmp_len;
1452 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1453 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1454 hdr_desc.hdr.len = tmp;
1455 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1456 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1457 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1458 hdr_desc.hdr.flag = hdr_field << 1;
1460 memcpy(data, cur, tmp);
1462 *scrq_arr = hdr_desc;
1471 * build_hdr_descs_arr - build a header descriptor array
1472 * @skb - socket buffer
1473 * @num_entries - number of descriptors to be sent
1474 * @subcrq - first TX descriptor
1475 * @hdr_field - bit field determining which headers will be sent
1477 * This function will build a TX descriptor array with applicable
1478 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1481 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1482 int *num_entries, u8 hdr_field)
1484 int hdr_len[3] = {0, 0, 0};
1486 u8 *hdr_data = txbuff->hdr_data;
1488 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1490 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1491 txbuff->indir_arr + 1);
1494 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1495 struct net_device *netdev)
1497 /* For some backing devices, mishandling of small packets
1498 * can result in a loss of connection or TX stall. Device
1499 * architects recommend that no packet should be smaller
1500 * than the minimum MTU value provided to the driver, so
1501 * pad any packets to that length
1503 if (skb->len < netdev->min_mtu)
1504 return skb_put_padto(skb, netdev->min_mtu);
1509 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1511 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1512 int queue_num = skb_get_queue_mapping(skb);
1513 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1514 struct device *dev = &adapter->vdev->dev;
1515 struct ibmvnic_tx_buff *tx_buff = NULL;
1516 struct ibmvnic_sub_crq_queue *tx_scrq;
1517 struct ibmvnic_tx_pool *tx_pool;
1518 unsigned int tx_send_failed = 0;
1519 unsigned int tx_map_failed = 0;
1520 unsigned int tx_dropped = 0;
1521 unsigned int tx_packets = 0;
1522 unsigned int tx_bytes = 0;
1523 dma_addr_t data_dma_addr;
1524 struct netdev_queue *txq;
1525 unsigned long lpar_rc;
1526 union sub_crq tx_crq;
1527 unsigned int offset;
1528 int num_entries = 1;
1533 netdev_tx_t ret = NETDEV_TX_OK;
1535 if (test_bit(0, &adapter->resetting)) {
1536 if (!netif_subqueue_stopped(netdev, skb))
1537 netif_stop_subqueue(netdev, queue_num);
1538 dev_kfree_skb_any(skb);
1546 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1552 if (skb_is_gso(skb))
1553 tx_pool = &adapter->tso_pool[queue_num];
1555 tx_pool = &adapter->tx_pool[queue_num];
1557 tx_scrq = adapter->tx_scrq[queue_num];
1558 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1559 handle = tx_scrq->handle;
1561 index = tx_pool->free_map[tx_pool->consumer_index];
1563 if (index == IBMVNIC_INVALID_MAP) {
1564 dev_kfree_skb_any(skb);
1571 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1573 offset = index * tx_pool->buf_size;
1574 dst = tx_pool->long_term_buff.buff + offset;
1575 memset(dst, 0, tx_pool->buf_size);
1576 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1578 if (skb_shinfo(skb)->nr_frags) {
1582 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1583 cur = skb_headlen(skb);
1585 /* Copy the frags */
1586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1590 page_address(skb_frag_page(frag)) +
1591 skb_frag_off(frag), skb_frag_size(frag));
1592 cur += skb_frag_size(frag);
1595 skb_copy_from_linear_data(skb, dst, skb->len);
1598 tx_pool->consumer_index =
1599 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1601 tx_buff = &tx_pool->tx_buff[index];
1603 tx_buff->data_dma[0] = data_dma_addr;
1604 tx_buff->data_len[0] = skb->len;
1605 tx_buff->index = index;
1606 tx_buff->pool_index = queue_num;
1607 tx_buff->last_frag = true;
1609 memset(&tx_crq, 0, sizeof(tx_crq));
1610 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1611 tx_crq.v1.type = IBMVNIC_TX_DESC;
1612 tx_crq.v1.n_crq_elem = 1;
1613 tx_crq.v1.n_sge = 1;
1614 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1616 if (skb_is_gso(skb))
1617 tx_crq.v1.correlator =
1618 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1620 tx_crq.v1.correlator = cpu_to_be32(index);
1621 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1622 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1623 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1625 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1626 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1627 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1630 if (skb->protocol == htons(ETH_P_IP)) {
1631 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1632 proto = ip_hdr(skb)->protocol;
1633 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1634 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1635 proto = ipv6_hdr(skb)->nexthdr;
1638 if (proto == IPPROTO_TCP)
1639 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1640 else if (proto == IPPROTO_UDP)
1641 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1644 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1647 if (skb_is_gso(skb)) {
1648 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1649 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1652 /* determine if l2/3/4 headers are sent to firmware */
1653 if ((*hdrs >> 7) & 1) {
1654 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1655 tx_crq.v1.n_crq_elem = num_entries;
1656 tx_buff->num_entries = num_entries;
1657 tx_buff->indir_arr[0] = tx_crq;
1658 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1659 sizeof(tx_buff->indir_arr),
1661 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1662 dev_kfree_skb_any(skb);
1663 tx_buff->skb = NULL;
1664 if (!firmware_has_feature(FW_FEATURE_CMO))
1665 dev_err(dev, "tx: unable to map descriptor array\n");
1671 lpar_rc = send_subcrq_indirect(adapter, handle,
1672 (u64)tx_buff->indir_dma,
1674 dma_unmap_single(dev, tx_buff->indir_dma,
1675 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1677 tx_buff->num_entries = num_entries;
1678 lpar_rc = send_subcrq(adapter, handle,
1681 if (lpar_rc != H_SUCCESS) {
1682 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1683 dev_err_ratelimited(dev, "tx: send failed\n");
1684 dev_kfree_skb_any(skb);
1685 tx_buff->skb = NULL;
1687 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1688 /* Disable TX and report carrier off if queue is closed
1689 * or pending failover.
1690 * Firmware guarantees that a signal will be sent to the
1691 * driver, triggering a reset or some other action.
1693 netif_tx_stop_all_queues(netdev);
1694 netif_carrier_off(netdev);
1703 if (atomic_add_return(num_entries, &tx_scrq->used)
1704 >= adapter->req_tx_entries_per_subcrq) {
1705 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1706 netif_stop_subqueue(netdev, queue_num);
1710 tx_bytes += skb->len;
1711 txq->trans_start = jiffies;
1716 /* roll back consumer index and map array*/
1717 if (tx_pool->consumer_index == 0)
1718 tx_pool->consumer_index =
1719 tx_pool->num_buffers - 1;
1721 tx_pool->consumer_index--;
1722 tx_pool->free_map[tx_pool->consumer_index] = index;
1724 netdev->stats.tx_dropped += tx_dropped;
1725 netdev->stats.tx_bytes += tx_bytes;
1726 netdev->stats.tx_packets += tx_packets;
1727 adapter->tx_send_failed += tx_send_failed;
1728 adapter->tx_map_failed += tx_map_failed;
1729 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1730 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1731 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1736 static void ibmvnic_set_multi(struct net_device *netdev)
1738 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1739 struct netdev_hw_addr *ha;
1740 union ibmvnic_crq crq;
1742 memset(&crq, 0, sizeof(crq));
1743 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1744 crq.request_capability.cmd = REQUEST_CAPABILITY;
1746 if (netdev->flags & IFF_PROMISC) {
1747 if (!adapter->promisc_supported)
1750 if (netdev->flags & IFF_ALLMULTI) {
1751 /* Accept all multicast */
1752 memset(&crq, 0, sizeof(crq));
1753 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1756 ibmvnic_send_crq(adapter, &crq);
1757 } else if (netdev_mc_empty(netdev)) {
1758 /* Reject all multicast */
1759 memset(&crq, 0, sizeof(crq));
1760 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1761 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1762 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1763 ibmvnic_send_crq(adapter, &crq);
1765 /* Accept one or more multicast(s) */
1766 netdev_for_each_mc_addr(ha, netdev) {
1767 memset(&crq, 0, sizeof(crq));
1768 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1769 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1770 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1771 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1773 ibmvnic_send_crq(adapter, &crq);
1779 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1781 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1782 union ibmvnic_crq crq;
1785 if (!is_valid_ether_addr(dev_addr)) {
1786 rc = -EADDRNOTAVAIL;
1790 memset(&crq, 0, sizeof(crq));
1791 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1792 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1793 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1795 mutex_lock(&adapter->fw_lock);
1796 adapter->fw_done_rc = 0;
1797 reinit_completion(&adapter->fw_done);
1799 rc = ibmvnic_send_crq(adapter, &crq);
1802 mutex_unlock(&adapter->fw_lock);
1806 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1807 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1808 if (rc || adapter->fw_done_rc) {
1810 mutex_unlock(&adapter->fw_lock);
1813 mutex_unlock(&adapter->fw_lock);
1816 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1820 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1822 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1823 struct sockaddr *addr = p;
1827 if (!is_valid_ether_addr(addr->sa_data))
1828 return -EADDRNOTAVAIL;
1830 if (adapter->state != VNIC_PROBED) {
1831 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1832 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1839 * do_change_param_reset returns zero if we are able to keep processing reset
1840 * events, or non-zero if we hit a fatal error and must halt.
1842 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1843 struct ibmvnic_rwi *rwi,
1846 struct net_device *netdev = adapter->netdev;
1849 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1852 netif_carrier_off(netdev);
1853 adapter->reset_reason = rwi->reset_reason;
1855 ibmvnic_cleanup(netdev);
1857 if (reset_state == VNIC_OPEN) {
1858 rc = __ibmvnic_close(netdev);
1863 release_resources(adapter);
1864 release_sub_crqs(adapter, 1);
1865 release_crq_queue(adapter);
1867 adapter->state = VNIC_PROBED;
1869 rc = init_crq_queue(adapter);
1872 netdev_err(adapter->netdev,
1873 "Couldn't initialize crq. rc=%d\n", rc);
1877 rc = ibmvnic_reset_init(adapter, true);
1879 rc = IBMVNIC_INIT_FAILED;
1883 /* If the adapter was in PROBE state prior to the reset,
1886 if (reset_state == VNIC_PROBED)
1889 rc = ibmvnic_login(netdev);
1894 rc = init_resources(adapter);
1898 ibmvnic_disable_irqs(adapter);
1900 adapter->state = VNIC_CLOSED;
1902 if (reset_state == VNIC_CLOSED)
1905 rc = __ibmvnic_open(netdev);
1907 rc = IBMVNIC_OPEN_FAILED;
1911 /* refresh device's multicast list */
1912 ibmvnic_set_multi(netdev);
1915 for (i = 0; i < adapter->req_rx_queues; i++)
1916 napi_schedule(&adapter->napi[i]);
1920 adapter->state = reset_state;
1925 * do_reset returns zero if we are able to keep processing reset events, or
1926 * non-zero if we hit a fatal error and must halt.
1928 static int do_reset(struct ibmvnic_adapter *adapter,
1929 struct ibmvnic_rwi *rwi, u32 reset_state)
1931 u64 old_num_rx_queues, old_num_tx_queues;
1932 u64 old_num_rx_slots, old_num_tx_slots;
1933 struct net_device *netdev = adapter->netdev;
1936 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1941 * Now that we have the rtnl lock, clear any pending failover.
1942 * This will ensure ibmvnic_open() has either completed or will
1943 * block until failover is complete.
1945 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1946 adapter->failover_pending = false;
1948 netif_carrier_off(netdev);
1949 adapter->reset_reason = rwi->reset_reason;
1951 old_num_rx_queues = adapter->req_rx_queues;
1952 old_num_tx_queues = adapter->req_tx_queues;
1953 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1954 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1956 ibmvnic_cleanup(netdev);
1958 if (reset_state == VNIC_OPEN &&
1959 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1960 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1961 adapter->state = VNIC_CLOSING;
1963 /* Release the RTNL lock before link state change and
1964 * re-acquire after the link state change to allow
1965 * linkwatch_event to grab the RTNL lock and run during
1969 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1974 if (adapter->state != VNIC_CLOSING) {
1979 adapter->state = VNIC_CLOSED;
1982 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1983 /* remove the closed state so when we call open it appears
1984 * we are coming from the probed state.
1986 adapter->state = VNIC_PROBED;
1988 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1989 rc = ibmvnic_reenable_crq_queue(adapter);
1990 release_sub_crqs(adapter, 1);
1992 rc = ibmvnic_reset_crq(adapter);
1993 if (rc == H_CLOSED || rc == H_SUCCESS) {
1994 rc = vio_enable_interrupts(adapter->vdev);
1996 netdev_err(adapter->netdev,
1997 "Reset failed to enable interrupts. rc=%d\n",
2003 netdev_err(adapter->netdev,
2004 "Reset couldn't initialize crq. rc=%d\n", rc);
2008 rc = ibmvnic_reset_init(adapter, true);
2010 rc = IBMVNIC_INIT_FAILED;
2014 /* If the adapter was in PROBE state prior to the reset,
2017 if (reset_state == VNIC_PROBED) {
2022 rc = ibmvnic_login(netdev);
2027 if (adapter->req_rx_queues != old_num_rx_queues ||
2028 adapter->req_tx_queues != old_num_tx_queues ||
2029 adapter->req_rx_add_entries_per_subcrq !=
2031 adapter->req_tx_entries_per_subcrq !=
2033 !adapter->rx_pool ||
2034 !adapter->tso_pool ||
2035 !adapter->tx_pool) {
2036 release_rx_pools(adapter);
2037 release_tx_pools(adapter);
2038 release_napi(adapter);
2039 release_vpd_data(adapter);
2041 rc = init_resources(adapter);
2046 rc = reset_tx_pools(adapter);
2048 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2053 rc = reset_rx_pools(adapter);
2055 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2060 ibmvnic_disable_irqs(adapter);
2062 adapter->state = VNIC_CLOSED;
2064 if (reset_state == VNIC_CLOSED) {
2069 rc = __ibmvnic_open(netdev);
2071 rc = IBMVNIC_OPEN_FAILED;
2075 /* refresh device's multicast list */
2076 ibmvnic_set_multi(netdev);
2079 for (i = 0; i < adapter->req_rx_queues; i++)
2080 napi_schedule(&adapter->napi[i]);
2082 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2083 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2084 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2085 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2091 /* restore the adapter state if reset failed */
2093 adapter->state = reset_state;
2099 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2100 struct ibmvnic_rwi *rwi, u32 reset_state)
2102 struct net_device *netdev = adapter->netdev;
2105 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2108 netif_carrier_off(netdev);
2109 adapter->reset_reason = rwi->reset_reason;
2111 ibmvnic_cleanup(netdev);
2112 release_resources(adapter);
2113 release_sub_crqs(adapter, 0);
2114 release_crq_queue(adapter);
2116 /* remove the closed state so when we call open it appears
2117 * we are coming from the probed state.
2119 adapter->state = VNIC_PROBED;
2121 reinit_completion(&adapter->init_done);
2122 rc = init_crq_queue(adapter);
2124 netdev_err(adapter->netdev,
2125 "Couldn't initialize crq. rc=%d\n", rc);
2129 rc = ibmvnic_reset_init(adapter, false);
2133 /* If the adapter was in PROBE state prior to the reset,
2136 if (reset_state == VNIC_PROBED)
2139 rc = ibmvnic_login(netdev);
2143 rc = init_resources(adapter);
2147 ibmvnic_disable_irqs(adapter);
2148 adapter->state = VNIC_CLOSED;
2150 if (reset_state == VNIC_CLOSED)
2153 rc = __ibmvnic_open(netdev);
2155 rc = IBMVNIC_OPEN_FAILED;
2159 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2160 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2162 /* restore adapter state if reset failed */
2164 adapter->state = reset_state;
2168 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2170 struct ibmvnic_rwi *rwi;
2171 unsigned long flags;
2173 spin_lock_irqsave(&adapter->rwi_lock, flags);
2175 if (!list_empty(&adapter->rwi_list)) {
2176 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2178 list_del(&rwi->list);
2183 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2187 static void __ibmvnic_reset(struct work_struct *work)
2189 struct ibmvnic_rwi *rwi;
2190 struct ibmvnic_adapter *adapter;
2191 bool saved_state = false;
2192 unsigned long flags;
2196 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2198 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2199 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2200 IBMVNIC_RESET_DELAY);
2204 rwi = get_next_rwi(adapter);
2206 spin_lock_irqsave(&adapter->state_lock, flags);
2208 if (adapter->state == VNIC_REMOVING ||
2209 adapter->state == VNIC_REMOVED) {
2210 spin_unlock_irqrestore(&adapter->state_lock, flags);
2217 reset_state = adapter->state;
2220 spin_unlock_irqrestore(&adapter->state_lock, flags);
2222 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2223 /* CHANGE_PARAM requestor holds rtnl_lock */
2224 rc = do_change_param_reset(adapter, rwi, reset_state);
2225 } else if (adapter->force_reset_recovery) {
2227 * Since we are doing a hard reset now, clear the
2228 * failover_pending flag so we don't ignore any
2229 * future MOBILITY or other resets.
2231 adapter->failover_pending = false;
2233 /* Transport event occurred during previous reset */
2234 if (adapter->wait_for_reset) {
2235 /* Previous was CHANGE_PARAM; caller locked */
2236 adapter->force_reset_recovery = false;
2237 rc = do_hard_reset(adapter, rwi, reset_state);
2240 adapter->force_reset_recovery = false;
2241 rc = do_hard_reset(adapter, rwi, reset_state);
2244 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2245 adapter->from_passive_init)) {
2246 rc = do_reset(adapter, rwi, reset_state);
2251 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2253 rwi = get_next_rwi(adapter);
2255 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2256 rwi->reset_reason == VNIC_RESET_MOBILITY))
2257 adapter->force_reset_recovery = true;
2260 if (adapter->wait_for_reset) {
2261 adapter->reset_done_rc = rc;
2262 complete(&adapter->reset_done);
2265 clear_bit_unlock(0, &adapter->resetting);
2268 static void __ibmvnic_delayed_reset(struct work_struct *work)
2270 struct ibmvnic_adapter *adapter;
2272 adapter = container_of(work, struct ibmvnic_adapter,
2273 ibmvnic_delayed_reset.work);
2274 __ibmvnic_reset(&adapter->ibmvnic_reset);
2277 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2278 enum ibmvnic_reset_reason reason)
2280 struct list_head *entry, *tmp_entry;
2281 struct ibmvnic_rwi *rwi, *tmp;
2282 struct net_device *netdev = adapter->netdev;
2283 unsigned long flags;
2287 * If failover is pending don't schedule any other reset.
2288 * Instead let the failover complete. If there is already a
2289 * a failover reset scheduled, we will detect and drop the
2290 * duplicate reset when walking the ->rwi_list below.
2292 if (adapter->state == VNIC_REMOVING ||
2293 adapter->state == VNIC_REMOVED ||
2294 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2296 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2300 if (adapter->state == VNIC_PROBING) {
2301 netdev_warn(netdev, "Adapter reset during probe\n");
2302 ret = adapter->init_done_rc = EAGAIN;
2306 spin_lock_irqsave(&adapter->rwi_lock, flags);
2308 list_for_each(entry, &adapter->rwi_list) {
2309 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2310 if (tmp->reset_reason == reason) {
2311 netdev_dbg(netdev, "Skipping matching reset\n");
2312 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2318 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2320 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2321 ibmvnic_close(netdev);
2325 /* if we just received a transport event,
2326 * flush reset queue and process this reset
2328 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2329 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2332 rwi->reset_reason = reason;
2333 list_add_tail(&rwi->list, &adapter->rwi_list);
2334 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2335 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2336 schedule_work(&adapter->ibmvnic_reset);
2343 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2345 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2347 if (test_bit(0, &adapter->resetting)) {
2348 netdev_err(adapter->netdev,
2349 "Adapter is resetting, skip timeout reset\n");
2353 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2356 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2357 struct ibmvnic_rx_buff *rx_buff)
2359 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2361 rx_buff->skb = NULL;
2363 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2364 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2366 atomic_dec(&pool->available);
2369 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2371 struct net_device *netdev = napi->dev;
2372 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2373 int scrq_num = (int)(napi - adapter->napi);
2374 int frames_processed = 0;
2377 while (frames_processed < budget) {
2378 struct sk_buff *skb;
2379 struct ibmvnic_rx_buff *rx_buff;
2380 union sub_crq *next;
2385 if (unlikely(test_bit(0, &adapter->resetting) &&
2386 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2387 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2388 napi_complete_done(napi, frames_processed);
2389 return frames_processed;
2392 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2394 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2396 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2397 rx_comp.correlator);
2398 /* do error checking */
2399 if (next->rx_comp.rc) {
2400 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2401 be16_to_cpu(next->rx_comp.rc));
2402 /* free the entry */
2403 next->rx_comp.first = 0;
2404 dev_kfree_skb_any(rx_buff->skb);
2405 remove_buff_from_pool(adapter, rx_buff);
2407 } else if (!rx_buff->skb) {
2408 /* free the entry */
2409 next->rx_comp.first = 0;
2410 remove_buff_from_pool(adapter, rx_buff);
2414 length = be32_to_cpu(next->rx_comp.len);
2415 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2416 flags = next->rx_comp.flags;
2418 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2421 /* VLAN Header has been stripped by the system firmware and
2422 * needs to be inserted by the driver
2424 if (adapter->rx_vlan_header_insertion &&
2425 (flags & IBMVNIC_VLAN_STRIPPED))
2426 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2427 ntohs(next->rx_comp.vlan_tci));
2429 /* free the entry */
2430 next->rx_comp.first = 0;
2431 remove_buff_from_pool(adapter, rx_buff);
2433 skb_put(skb, length);
2434 skb->protocol = eth_type_trans(skb, netdev);
2435 skb_record_rx_queue(skb, scrq_num);
2437 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2438 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2439 skb->ip_summed = CHECKSUM_UNNECESSARY;
2443 napi_gro_receive(napi, skb); /* send it up */
2444 netdev->stats.rx_packets++;
2445 netdev->stats.rx_bytes += length;
2446 adapter->rx_stats_buffers[scrq_num].packets++;
2447 adapter->rx_stats_buffers[scrq_num].bytes += length;
2451 if (adapter->state != VNIC_CLOSING)
2452 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2454 if (frames_processed < budget) {
2455 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2456 napi_complete_done(napi, frames_processed);
2457 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2458 napi_reschedule(napi)) {
2459 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2463 return frames_processed;
2466 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2470 adapter->fallback.mtu = adapter->req_mtu;
2471 adapter->fallback.rx_queues = adapter->req_rx_queues;
2472 adapter->fallback.tx_queues = adapter->req_tx_queues;
2473 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2474 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2476 reinit_completion(&adapter->reset_done);
2477 adapter->wait_for_reset = true;
2478 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2484 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2491 if (adapter->reset_done_rc) {
2493 adapter->desired.mtu = adapter->fallback.mtu;
2494 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2495 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2496 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2497 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2499 reinit_completion(&adapter->reset_done);
2500 adapter->wait_for_reset = true;
2501 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2506 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2514 adapter->wait_for_reset = false;
2519 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2521 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2523 adapter->desired.mtu = new_mtu + ETH_HLEN;
2525 return wait_for_reset(adapter);
2528 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2529 struct net_device *dev,
2530 netdev_features_t features)
2532 /* Some backing hardware adapters can not
2533 * handle packets with a MSS less than 224
2534 * or with only one segment.
2536 if (skb_is_gso(skb)) {
2537 if (skb_shinfo(skb)->gso_size < 224 ||
2538 skb_shinfo(skb)->gso_segs == 1)
2539 features &= ~NETIF_F_GSO_MASK;
2545 static const struct net_device_ops ibmvnic_netdev_ops = {
2546 .ndo_open = ibmvnic_open,
2547 .ndo_stop = ibmvnic_close,
2548 .ndo_start_xmit = ibmvnic_xmit,
2549 .ndo_set_rx_mode = ibmvnic_set_multi,
2550 .ndo_set_mac_address = ibmvnic_set_mac,
2551 .ndo_validate_addr = eth_validate_addr,
2552 .ndo_tx_timeout = ibmvnic_tx_timeout,
2553 .ndo_change_mtu = ibmvnic_change_mtu,
2554 .ndo_features_check = ibmvnic_features_check,
2557 /* ethtool functions */
2559 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2560 struct ethtool_link_ksettings *cmd)
2562 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2565 rc = send_query_phys_parms(adapter);
2567 adapter->speed = SPEED_UNKNOWN;
2568 adapter->duplex = DUPLEX_UNKNOWN;
2570 cmd->base.speed = adapter->speed;
2571 cmd->base.duplex = adapter->duplex;
2572 cmd->base.port = PORT_FIBRE;
2573 cmd->base.phy_address = 0;
2574 cmd->base.autoneg = AUTONEG_ENABLE;
2579 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2580 struct ethtool_drvinfo *info)
2582 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2584 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2585 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2586 strlcpy(info->fw_version, adapter->fw_version,
2587 sizeof(info->fw_version));
2590 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2592 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2594 return adapter->msg_enable;
2597 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2599 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2601 adapter->msg_enable = data;
2604 static u32 ibmvnic_get_link(struct net_device *netdev)
2606 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2608 /* Don't need to send a query because we request a logical link up at
2609 * init and then we wait for link state indications
2611 return adapter->logical_link_state;
2614 static void ibmvnic_get_ringparam(struct net_device *netdev,
2615 struct ethtool_ringparam *ring)
2617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2619 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2620 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2621 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2623 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2624 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2626 ring->rx_mini_max_pending = 0;
2627 ring->rx_jumbo_max_pending = 0;
2628 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2629 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2630 ring->rx_mini_pending = 0;
2631 ring->rx_jumbo_pending = 0;
2634 static int ibmvnic_set_ringparam(struct net_device *netdev,
2635 struct ethtool_ringparam *ring)
2637 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2641 adapter->desired.rx_entries = ring->rx_pending;
2642 adapter->desired.tx_entries = ring->tx_pending;
2644 ret = wait_for_reset(adapter);
2647 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2648 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2650 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2651 ring->rx_pending, ring->tx_pending,
2652 adapter->req_rx_add_entries_per_subcrq,
2653 adapter->req_tx_entries_per_subcrq);
2657 static void ibmvnic_get_channels(struct net_device *netdev,
2658 struct ethtool_channels *channels)
2660 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2662 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2663 channels->max_rx = adapter->max_rx_queues;
2664 channels->max_tx = adapter->max_tx_queues;
2666 channels->max_rx = IBMVNIC_MAX_QUEUES;
2667 channels->max_tx = IBMVNIC_MAX_QUEUES;
2670 channels->max_other = 0;
2671 channels->max_combined = 0;
2672 channels->rx_count = adapter->req_rx_queues;
2673 channels->tx_count = adapter->req_tx_queues;
2674 channels->other_count = 0;
2675 channels->combined_count = 0;
2678 static int ibmvnic_set_channels(struct net_device *netdev,
2679 struct ethtool_channels *channels)
2681 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2685 adapter->desired.rx_queues = channels->rx_count;
2686 adapter->desired.tx_queues = channels->tx_count;
2688 ret = wait_for_reset(adapter);
2691 (adapter->req_rx_queues != channels->rx_count ||
2692 adapter->req_tx_queues != channels->tx_count))
2694 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2695 channels->rx_count, channels->tx_count,
2696 adapter->req_rx_queues, adapter->req_tx_queues);
2701 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2703 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2706 switch (stringset) {
2708 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2709 i++, data += ETH_GSTRING_LEN)
2710 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2712 for (i = 0; i < adapter->req_tx_queues; i++) {
2713 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2714 data += ETH_GSTRING_LEN;
2716 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2717 data += ETH_GSTRING_LEN;
2719 snprintf(data, ETH_GSTRING_LEN,
2720 "tx%d_dropped_packets", i);
2721 data += ETH_GSTRING_LEN;
2724 for (i = 0; i < adapter->req_rx_queues; i++) {
2725 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2726 data += ETH_GSTRING_LEN;
2728 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2729 data += ETH_GSTRING_LEN;
2731 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2732 data += ETH_GSTRING_LEN;
2736 case ETH_SS_PRIV_FLAGS:
2737 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2738 strcpy(data + i * ETH_GSTRING_LEN,
2739 ibmvnic_priv_flags[i]);
2746 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2748 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2752 return ARRAY_SIZE(ibmvnic_stats) +
2753 adapter->req_tx_queues * NUM_TX_STATS +
2754 adapter->req_rx_queues * NUM_RX_STATS;
2755 case ETH_SS_PRIV_FLAGS:
2756 return ARRAY_SIZE(ibmvnic_priv_flags);
2762 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2763 struct ethtool_stats *stats, u64 *data)
2765 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2766 union ibmvnic_crq crq;
2770 memset(&crq, 0, sizeof(crq));
2771 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2772 crq.request_statistics.cmd = REQUEST_STATISTICS;
2773 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2774 crq.request_statistics.len =
2775 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2777 /* Wait for data to be written */
2778 reinit_completion(&adapter->stats_done);
2779 rc = ibmvnic_send_crq(adapter, &crq);
2782 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2786 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2787 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2788 ibmvnic_stats[i].offset));
2790 for (j = 0; j < adapter->req_tx_queues; j++) {
2791 data[i] = adapter->tx_stats_buffers[j].packets;
2793 data[i] = adapter->tx_stats_buffers[j].bytes;
2795 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2799 for (j = 0; j < adapter->req_rx_queues; j++) {
2800 data[i] = adapter->rx_stats_buffers[j].packets;
2802 data[i] = adapter->rx_stats_buffers[j].bytes;
2804 data[i] = adapter->rx_stats_buffers[j].interrupts;
2809 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2811 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2813 return adapter->priv_flags;
2816 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2818 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2819 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2822 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2824 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2828 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2829 .get_drvinfo = ibmvnic_get_drvinfo,
2830 .get_msglevel = ibmvnic_get_msglevel,
2831 .set_msglevel = ibmvnic_set_msglevel,
2832 .get_link = ibmvnic_get_link,
2833 .get_ringparam = ibmvnic_get_ringparam,
2834 .set_ringparam = ibmvnic_set_ringparam,
2835 .get_channels = ibmvnic_get_channels,
2836 .set_channels = ibmvnic_set_channels,
2837 .get_strings = ibmvnic_get_strings,
2838 .get_sset_count = ibmvnic_get_sset_count,
2839 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2840 .get_link_ksettings = ibmvnic_get_link_ksettings,
2841 .get_priv_flags = ibmvnic_get_priv_flags,
2842 .set_priv_flags = ibmvnic_set_priv_flags,
2845 /* Routines for managing CRQs/sCRQs */
2847 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2848 struct ibmvnic_sub_crq_queue *scrq)
2853 netdev_dbg(adapter->netdev,
2854 "Invalid scrq reset. irq (%d) or msgs (%p).\n",
2855 scrq->irq, scrq->msgs);
2860 free_irq(scrq->irq, scrq);
2861 irq_dispose_mapping(scrq->irq);
2865 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2866 atomic_set(&scrq->used, 0);
2869 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2873 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2874 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2878 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2882 if (!adapter->tx_scrq || !adapter->rx_scrq)
2885 for (i = 0; i < adapter->req_tx_queues; i++) {
2886 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2887 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2892 for (i = 0; i < adapter->req_rx_queues; i++) {
2893 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2894 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2902 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2903 struct ibmvnic_sub_crq_queue *scrq,
2906 struct device *dev = &adapter->vdev->dev;
2909 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2912 /* Close the sub-crqs */
2914 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2915 adapter->vdev->unit_address,
2917 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2920 netdev_err(adapter->netdev,
2921 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2926 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2928 free_pages((unsigned long)scrq->msgs, 2);
2932 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2935 struct device *dev = &adapter->vdev->dev;
2936 struct ibmvnic_sub_crq_queue *scrq;
2939 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2944 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2946 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2947 goto zero_page_failed;
2950 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2952 if (dma_mapping_error(dev, scrq->msg_token)) {
2953 dev_warn(dev, "Couldn't map crq queue messages page\n");
2957 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2958 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2960 if (rc == H_RESOURCE)
2961 rc = ibmvnic_reset_crq(adapter);
2963 if (rc == H_CLOSED) {
2964 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2966 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2970 scrq->adapter = adapter;
2971 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2972 spin_lock_init(&scrq->lock);
2974 netdev_dbg(adapter->netdev,
2975 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2976 scrq->crq_num, scrq->hw_irq, scrq->irq);
2981 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2984 free_pages((unsigned long)scrq->msgs, 2);
2991 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2995 if (adapter->tx_scrq) {
2996 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2997 if (!adapter->tx_scrq[i])
3000 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3002 if (adapter->tx_scrq[i]->irq) {
3003 free_irq(adapter->tx_scrq[i]->irq,
3004 adapter->tx_scrq[i]);
3005 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3006 adapter->tx_scrq[i]->irq = 0;
3009 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3013 kfree(adapter->tx_scrq);
3014 adapter->tx_scrq = NULL;
3015 adapter->num_active_tx_scrqs = 0;
3018 if (adapter->rx_scrq) {
3019 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3020 if (!adapter->rx_scrq[i])
3023 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3025 if (adapter->rx_scrq[i]->irq) {
3026 free_irq(adapter->rx_scrq[i]->irq,
3027 adapter->rx_scrq[i]);
3028 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3029 adapter->rx_scrq[i]->irq = 0;
3032 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3036 kfree(adapter->rx_scrq);
3037 adapter->rx_scrq = NULL;
3038 adapter->num_active_rx_scrqs = 0;
3042 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3043 struct ibmvnic_sub_crq_queue *scrq)
3045 struct device *dev = &adapter->vdev->dev;
3048 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3049 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3051 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3056 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3057 struct ibmvnic_sub_crq_queue *scrq)
3059 struct device *dev = &adapter->vdev->dev;
3062 if (scrq->hw_irq > 0x100000000ULL) {
3063 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3067 if (test_bit(0, &adapter->resetting) &&
3068 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3069 u64 val = (0xff000000) | scrq->hw_irq;
3071 rc = plpar_hcall_norets(H_EOI, val);
3072 /* H_EOI would fail with rc = H_FUNCTION when running
3073 * in XIVE mode which is expected, but not an error.
3075 if (rc && (rc != H_FUNCTION))
3076 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3080 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3081 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3083 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3088 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3089 struct ibmvnic_sub_crq_queue *scrq)
3091 struct device *dev = &adapter->vdev->dev;
3092 struct ibmvnic_tx_pool *tx_pool;
3093 struct ibmvnic_tx_buff *txbuff;
3094 union sub_crq *next;
3099 while (pending_scrq(adapter, scrq)) {
3100 unsigned int pool = scrq->pool_index;
3101 int num_entries = 0;
3103 next = ibmvnic_next_scrq(adapter, scrq);
3104 for (i = 0; i < next->tx_comp.num_comps; i++) {
3105 if (next->tx_comp.rcs[i]) {
3106 dev_err(dev, "tx error %x\n",
3107 next->tx_comp.rcs[i]);
3110 index = be32_to_cpu(next->tx_comp.correlators[i]);
3111 if (index & IBMVNIC_TSO_POOL_MASK) {
3112 tx_pool = &adapter->tso_pool[pool];
3113 index &= ~IBMVNIC_TSO_POOL_MASK;
3115 tx_pool = &adapter->tx_pool[pool];
3118 txbuff = &tx_pool->tx_buff[index];
3120 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3121 if (!txbuff->data_dma[j])
3124 txbuff->data_dma[j] = 0;
3127 if (txbuff->last_frag) {
3128 dev_kfree_skb_any(txbuff->skb);
3132 num_entries += txbuff->num_entries;
3134 tx_pool->free_map[tx_pool->producer_index] = index;
3135 tx_pool->producer_index =
3136 (tx_pool->producer_index + 1) %
3137 tx_pool->num_buffers;
3139 /* remove tx_comp scrq*/
3140 next->tx_comp.first = 0;
3142 if (atomic_sub_return(num_entries, &scrq->used) <=
3143 (adapter->req_tx_entries_per_subcrq / 2) &&
3144 __netif_subqueue_stopped(adapter->netdev,
3145 scrq->pool_index)) {
3146 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3147 netdev_dbg(adapter->netdev, "Started queue %d\n",
3152 enable_scrq_irq(adapter, scrq);
3154 if (pending_scrq(adapter, scrq)) {
3155 disable_scrq_irq(adapter, scrq);
3162 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3164 struct ibmvnic_sub_crq_queue *scrq = instance;
3165 struct ibmvnic_adapter *adapter = scrq->adapter;
3167 disable_scrq_irq(adapter, scrq);
3168 ibmvnic_complete_tx(adapter, scrq);
3173 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3175 struct ibmvnic_sub_crq_queue *scrq = instance;
3176 struct ibmvnic_adapter *adapter = scrq->adapter;
3178 /* When booting a kdump kernel we can hit pending interrupts
3179 * prior to completing driver initialization.
3181 if (unlikely(adapter->state != VNIC_OPEN))
3184 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3186 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3187 disable_scrq_irq(adapter, scrq);
3188 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3194 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3196 struct device *dev = &adapter->vdev->dev;
3197 struct ibmvnic_sub_crq_queue *scrq;
3201 for (i = 0; i < adapter->req_tx_queues; i++) {
3202 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3204 scrq = adapter->tx_scrq[i];
3205 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3209 dev_err(dev, "Error mapping irq\n");
3210 goto req_tx_irq_failed;
3213 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3214 adapter->vdev->unit_address, i);
3215 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3216 0, scrq->name, scrq);
3219 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3221 irq_dispose_mapping(scrq->irq);
3222 goto req_tx_irq_failed;
3226 for (i = 0; i < adapter->req_rx_queues; i++) {
3227 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3229 scrq = adapter->rx_scrq[i];
3230 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3233 dev_err(dev, "Error mapping irq\n");
3234 goto req_rx_irq_failed;
3236 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3237 adapter->vdev->unit_address, i);
3238 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3239 0, scrq->name, scrq);
3241 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3243 irq_dispose_mapping(scrq->irq);
3244 goto req_rx_irq_failed;
3250 for (j = 0; j < i; j++) {
3251 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3252 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3254 i = adapter->req_tx_queues;
3256 for (j = 0; j < i; j++) {
3257 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3258 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3260 release_sub_crqs(adapter, 1);
3264 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3266 struct device *dev = &adapter->vdev->dev;
3267 struct ibmvnic_sub_crq_queue **allqueues;
3268 int registered_queues = 0;
3273 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3275 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3279 for (i = 0; i < total_queues; i++) {
3280 allqueues[i] = init_sub_crq_queue(adapter);
3281 if (!allqueues[i]) {
3282 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3285 registered_queues++;
3288 /* Make sure we were able to register the minimum number of queues */
3289 if (registered_queues <
3290 adapter->min_tx_queues + adapter->min_rx_queues) {
3291 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3295 /* Distribute the failed allocated queues*/
3296 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3297 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3300 if (adapter->req_rx_queues > adapter->min_rx_queues)
3301 adapter->req_rx_queues--;
3306 if (adapter->req_tx_queues > adapter->min_tx_queues)
3307 adapter->req_tx_queues--;
3314 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3315 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3316 if (!adapter->tx_scrq)
3319 for (i = 0; i < adapter->req_tx_queues; i++) {
3320 adapter->tx_scrq[i] = allqueues[i];
3321 adapter->tx_scrq[i]->pool_index = i;
3322 adapter->num_active_tx_scrqs++;
3325 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3326 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3327 if (!adapter->rx_scrq)
3330 for (i = 0; i < adapter->req_rx_queues; i++) {
3331 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3332 adapter->rx_scrq[i]->scrq_num = i;
3333 adapter->num_active_rx_scrqs++;
3340 kfree(adapter->tx_scrq);
3341 adapter->tx_scrq = NULL;
3343 for (i = 0; i < registered_queues; i++)
3344 release_sub_crq_queue(adapter, allqueues[i], 1);
3349 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3351 struct device *dev = &adapter->vdev->dev;
3352 union ibmvnic_crq crq;
3356 /* Sub-CRQ entries are 32 byte long */
3357 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3359 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3360 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3361 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3365 if (adapter->desired.mtu)
3366 adapter->req_mtu = adapter->desired.mtu;
3368 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3370 if (!adapter->desired.tx_entries)
3371 adapter->desired.tx_entries =
3372 adapter->max_tx_entries_per_subcrq;
3373 if (!adapter->desired.rx_entries)
3374 adapter->desired.rx_entries =
3375 adapter->max_rx_add_entries_per_subcrq;
3377 max_entries = IBMVNIC_MAX_LTB_SIZE /
3378 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3380 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3381 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3382 adapter->desired.tx_entries = max_entries;
3385 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3386 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3387 adapter->desired.rx_entries = max_entries;
3390 if (adapter->desired.tx_entries)
3391 adapter->req_tx_entries_per_subcrq =
3392 adapter->desired.tx_entries;
3394 adapter->req_tx_entries_per_subcrq =
3395 adapter->max_tx_entries_per_subcrq;
3397 if (adapter->desired.rx_entries)
3398 adapter->req_rx_add_entries_per_subcrq =
3399 adapter->desired.rx_entries;
3401 adapter->req_rx_add_entries_per_subcrq =
3402 adapter->max_rx_add_entries_per_subcrq;
3404 if (adapter->desired.tx_queues)
3405 adapter->req_tx_queues =
3406 adapter->desired.tx_queues;
3408 adapter->req_tx_queues =
3409 adapter->opt_tx_comp_sub_queues;
3411 if (adapter->desired.rx_queues)
3412 adapter->req_rx_queues =
3413 adapter->desired.rx_queues;
3415 adapter->req_rx_queues =
3416 adapter->opt_rx_comp_queues;
3418 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3421 memset(&crq, 0, sizeof(crq));
3422 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3423 crq.request_capability.cmd = REQUEST_CAPABILITY;
3425 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3426 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3427 atomic_inc(&adapter->running_cap_crqs);
3428 ibmvnic_send_crq(adapter, &crq);
3430 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3431 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3432 atomic_inc(&adapter->running_cap_crqs);
3433 ibmvnic_send_crq(adapter, &crq);
3435 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3436 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3437 atomic_inc(&adapter->running_cap_crqs);
3438 ibmvnic_send_crq(adapter, &crq);
3440 crq.request_capability.capability =
3441 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3442 crq.request_capability.number =
3443 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3444 atomic_inc(&adapter->running_cap_crqs);
3445 ibmvnic_send_crq(adapter, &crq);
3447 crq.request_capability.capability =
3448 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3449 crq.request_capability.number =
3450 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3451 atomic_inc(&adapter->running_cap_crqs);
3452 ibmvnic_send_crq(adapter, &crq);
3454 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3455 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3456 atomic_inc(&adapter->running_cap_crqs);
3457 ibmvnic_send_crq(adapter, &crq);
3459 if (adapter->netdev->flags & IFF_PROMISC) {
3460 if (adapter->promisc_supported) {
3461 crq.request_capability.capability =
3462 cpu_to_be16(PROMISC_REQUESTED);
3463 crq.request_capability.number = cpu_to_be64(1);
3464 atomic_inc(&adapter->running_cap_crqs);
3465 ibmvnic_send_crq(adapter, &crq);
3468 crq.request_capability.capability =
3469 cpu_to_be16(PROMISC_REQUESTED);
3470 crq.request_capability.number = cpu_to_be64(0);
3471 atomic_inc(&adapter->running_cap_crqs);
3472 ibmvnic_send_crq(adapter, &crq);
3476 static int pending_scrq(struct ibmvnic_adapter *adapter,
3477 struct ibmvnic_sub_crq_queue *scrq)
3479 union sub_crq *entry = &scrq->msgs[scrq->cur];
3481 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3487 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3488 struct ibmvnic_sub_crq_queue *scrq)
3490 union sub_crq *entry;
3491 unsigned long flags;
3493 spin_lock_irqsave(&scrq->lock, flags);
3494 entry = &scrq->msgs[scrq->cur];
3495 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3496 if (++scrq->cur == scrq->size)
3501 spin_unlock_irqrestore(&scrq->lock, flags);
3506 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3508 struct ibmvnic_crq_queue *queue = &adapter->crq;
3509 union ibmvnic_crq *crq;
3511 crq = &queue->msgs[queue->cur];
3512 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3513 if (++queue->cur == queue->size)
3522 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3526 dev_warn_ratelimited(dev,
3527 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3531 dev_warn_ratelimited(dev,
3532 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3536 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3541 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3542 union sub_crq *sub_crq)
3544 unsigned int ua = adapter->vdev->unit_address;
3545 struct device *dev = &adapter->vdev->dev;
3546 u64 *u64_crq = (u64 *)sub_crq;
3549 netdev_dbg(adapter->netdev,
3550 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3551 (unsigned long int)cpu_to_be64(remote_handle),
3552 (unsigned long int)cpu_to_be64(u64_crq[0]),
3553 (unsigned long int)cpu_to_be64(u64_crq[1]),
3554 (unsigned long int)cpu_to_be64(u64_crq[2]),
3555 (unsigned long int)cpu_to_be64(u64_crq[3]));
3557 /* Make sure the hypervisor sees the complete request */
3560 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3561 cpu_to_be64(remote_handle),
3562 cpu_to_be64(u64_crq[0]),
3563 cpu_to_be64(u64_crq[1]),
3564 cpu_to_be64(u64_crq[2]),
3565 cpu_to_be64(u64_crq[3]));
3568 print_subcrq_error(dev, rc, __func__);
3573 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3574 u64 remote_handle, u64 ioba, u64 num_entries)
3576 unsigned int ua = adapter->vdev->unit_address;
3577 struct device *dev = &adapter->vdev->dev;
3580 /* Make sure the hypervisor sees the complete request */
3582 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3583 cpu_to_be64(remote_handle),
3587 print_subcrq_error(dev, rc, __func__);
3592 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3593 union ibmvnic_crq *crq)
3595 unsigned int ua = adapter->vdev->unit_address;
3596 struct device *dev = &adapter->vdev->dev;
3597 u64 *u64_crq = (u64 *)crq;
3600 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3601 (unsigned long int)cpu_to_be64(u64_crq[0]),
3602 (unsigned long int)cpu_to_be64(u64_crq[1]));
3604 if (!adapter->crq.active &&
3605 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3606 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3610 /* Make sure the hypervisor sees the complete request */
3613 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3614 cpu_to_be64(u64_crq[0]),
3615 cpu_to_be64(u64_crq[1]));
3618 if (rc == H_CLOSED) {
3619 dev_warn(dev, "CRQ Queue closed\n");
3620 /* do not reset, report the fail, wait for passive init from server */
3623 dev_warn(dev, "Send error (rc=%d)\n", rc);
3629 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3631 struct device *dev = &adapter->vdev->dev;
3632 union ibmvnic_crq crq;
3636 memset(&crq, 0, sizeof(crq));
3637 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3638 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3639 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3642 rc = ibmvnic_send_crq(adapter, &crq);
3648 } while (retries > 0);
3651 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3658 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3660 union ibmvnic_crq crq;
3662 memset(&crq, 0, sizeof(crq));
3663 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3664 crq.version_exchange.cmd = VERSION_EXCHANGE;
3665 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3667 return ibmvnic_send_crq(adapter, &crq);
3670 struct vnic_login_client_data {
3676 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3680 /* Calculate the amount of buffer space needed for the
3681 * vnic client data in the login buffer. There are four entries,
3682 * OS name, LPAR name, device name, and a null last entry.
3684 len = 4 * sizeof(struct vnic_login_client_data);
3685 len += 6; /* "Linux" plus NULL */
3686 len += strlen(utsname()->nodename) + 1;
3687 len += strlen(adapter->netdev->name) + 1;
3692 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3693 struct vnic_login_client_data *vlcd)
3695 const char *os_name = "Linux";
3698 /* Type 1 - LPAR OS */
3700 len = strlen(os_name) + 1;
3701 vlcd->len = cpu_to_be16(len);
3702 strncpy(vlcd->name, os_name, len);
3703 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3705 /* Type 2 - LPAR name */
3707 len = strlen(utsname()->nodename) + 1;
3708 vlcd->len = cpu_to_be16(len);
3709 strncpy(vlcd->name, utsname()->nodename, len);
3710 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3712 /* Type 3 - device name */
3714 len = strlen(adapter->netdev->name) + 1;
3715 vlcd->len = cpu_to_be16(len);
3716 strncpy(vlcd->name, adapter->netdev->name, len);
3719 static int send_login(struct ibmvnic_adapter *adapter)
3721 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3722 struct ibmvnic_login_buffer *login_buffer;
3723 struct device *dev = &adapter->vdev->dev;
3724 dma_addr_t rsp_buffer_token;
3725 dma_addr_t buffer_token;
3726 size_t rsp_buffer_size;
3727 union ibmvnic_crq crq;
3731 int client_data_len;
3732 struct vnic_login_client_data *vlcd;
3735 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3736 netdev_err(adapter->netdev,
3737 "RX or TX queues are not allocated, device login failed\n");
3741 release_login_rsp_buffer(adapter);
3742 client_data_len = vnic_client_data_len(adapter);
3745 sizeof(struct ibmvnic_login_buffer) +
3746 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3749 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3751 goto buf_alloc_failed;
3753 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3755 if (dma_mapping_error(dev, buffer_token)) {
3756 dev_err(dev, "Couldn't map login buffer\n");
3757 goto buf_map_failed;
3760 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3761 sizeof(u64) * adapter->req_tx_queues +
3762 sizeof(u64) * adapter->req_rx_queues +
3763 sizeof(u64) * adapter->req_rx_queues +
3764 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3766 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3767 if (!login_rsp_buffer)
3768 goto buf_rsp_alloc_failed;
3770 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3771 rsp_buffer_size, DMA_FROM_DEVICE);
3772 if (dma_mapping_error(dev, rsp_buffer_token)) {
3773 dev_err(dev, "Couldn't map login rsp buffer\n");
3774 goto buf_rsp_map_failed;
3777 adapter->login_buf = login_buffer;
3778 adapter->login_buf_token = buffer_token;
3779 adapter->login_buf_sz = buffer_size;
3780 adapter->login_rsp_buf = login_rsp_buffer;
3781 adapter->login_rsp_buf_token = rsp_buffer_token;
3782 adapter->login_rsp_buf_sz = rsp_buffer_size;
3784 login_buffer->len = cpu_to_be32(buffer_size);
3785 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3786 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3787 login_buffer->off_txcomp_subcrqs =
3788 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3789 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3790 login_buffer->off_rxcomp_subcrqs =
3791 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3792 sizeof(u64) * adapter->req_tx_queues);
3793 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3794 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3796 tx_list_p = (__be64 *)((char *)login_buffer +
3797 sizeof(struct ibmvnic_login_buffer));
3798 rx_list_p = (__be64 *)((char *)login_buffer +
3799 sizeof(struct ibmvnic_login_buffer) +
3800 sizeof(u64) * adapter->req_tx_queues);
3802 for (i = 0; i < adapter->req_tx_queues; i++) {
3803 if (adapter->tx_scrq[i]) {
3804 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3809 for (i = 0; i < adapter->req_rx_queues; i++) {
3810 if (adapter->rx_scrq[i]) {
3811 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3816 /* Insert vNIC login client data */
3817 vlcd = (struct vnic_login_client_data *)
3818 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3819 login_buffer->client_data_offset =
3820 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3821 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3823 vnic_add_client_data(adapter, vlcd);
3825 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3826 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3827 netdev_dbg(adapter->netdev, "%016lx\n",
3828 ((unsigned long int *)(adapter->login_buf))[i]);
3831 memset(&crq, 0, sizeof(crq));
3832 crq.login.first = IBMVNIC_CRQ_CMD;
3833 crq.login.cmd = LOGIN;
3834 crq.login.ioba = cpu_to_be32(buffer_token);
3835 crq.login.len = cpu_to_be32(buffer_size);
3836 ibmvnic_send_crq(adapter, &crq);
3841 kfree(login_rsp_buffer);
3842 buf_rsp_alloc_failed:
3843 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3845 kfree(login_buffer);
3850 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3853 union ibmvnic_crq crq;
3855 memset(&crq, 0, sizeof(crq));
3856 crq.request_map.first = IBMVNIC_CRQ_CMD;
3857 crq.request_map.cmd = REQUEST_MAP;
3858 crq.request_map.map_id = map_id;
3859 crq.request_map.ioba = cpu_to_be32(addr);
3860 crq.request_map.len = cpu_to_be32(len);
3861 return ibmvnic_send_crq(adapter, &crq);
3864 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3866 union ibmvnic_crq crq;
3868 memset(&crq, 0, sizeof(crq));
3869 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3870 crq.request_unmap.cmd = REQUEST_UNMAP;
3871 crq.request_unmap.map_id = map_id;
3872 return ibmvnic_send_crq(adapter, &crq);
3875 static void send_query_map(struct ibmvnic_adapter *adapter)
3877 union ibmvnic_crq crq;
3879 memset(&crq, 0, sizeof(crq));
3880 crq.query_map.first = IBMVNIC_CRQ_CMD;
3881 crq.query_map.cmd = QUERY_MAP;
3882 ibmvnic_send_crq(adapter, &crq);
3885 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3886 static void send_query_cap(struct ibmvnic_adapter *adapter)
3888 union ibmvnic_crq crq;
3890 atomic_set(&adapter->running_cap_crqs, 0);
3891 memset(&crq, 0, sizeof(crq));
3892 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3893 crq.query_capability.cmd = QUERY_CAPABILITY;
3895 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3896 atomic_inc(&adapter->running_cap_crqs);
3897 ibmvnic_send_crq(adapter, &crq);
3899 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3900 atomic_inc(&adapter->running_cap_crqs);
3901 ibmvnic_send_crq(adapter, &crq);
3903 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3904 atomic_inc(&adapter->running_cap_crqs);
3905 ibmvnic_send_crq(adapter, &crq);
3907 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3908 atomic_inc(&adapter->running_cap_crqs);
3909 ibmvnic_send_crq(adapter, &crq);
3911 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3912 atomic_inc(&adapter->running_cap_crqs);
3913 ibmvnic_send_crq(adapter, &crq);
3915 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3916 atomic_inc(&adapter->running_cap_crqs);
3917 ibmvnic_send_crq(adapter, &crq);
3919 crq.query_capability.capability =
3920 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3921 atomic_inc(&adapter->running_cap_crqs);
3922 ibmvnic_send_crq(adapter, &crq);
3924 crq.query_capability.capability =
3925 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3926 atomic_inc(&adapter->running_cap_crqs);
3927 ibmvnic_send_crq(adapter, &crq);
3929 crq.query_capability.capability =
3930 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3931 atomic_inc(&adapter->running_cap_crqs);
3932 ibmvnic_send_crq(adapter, &crq);
3934 crq.query_capability.capability =
3935 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3936 atomic_inc(&adapter->running_cap_crqs);
3937 ibmvnic_send_crq(adapter, &crq);
3939 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3940 atomic_inc(&adapter->running_cap_crqs);
3941 ibmvnic_send_crq(adapter, &crq);
3943 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3944 atomic_inc(&adapter->running_cap_crqs);
3945 ibmvnic_send_crq(adapter, &crq);
3947 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3948 atomic_inc(&adapter->running_cap_crqs);
3949 ibmvnic_send_crq(adapter, &crq);
3951 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3952 atomic_inc(&adapter->running_cap_crqs);
3953 ibmvnic_send_crq(adapter, &crq);
3955 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3956 atomic_inc(&adapter->running_cap_crqs);
3957 ibmvnic_send_crq(adapter, &crq);
3959 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3960 atomic_inc(&adapter->running_cap_crqs);
3961 ibmvnic_send_crq(adapter, &crq);
3963 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3964 atomic_inc(&adapter->running_cap_crqs);
3965 ibmvnic_send_crq(adapter, &crq);
3967 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3968 atomic_inc(&adapter->running_cap_crqs);
3969 ibmvnic_send_crq(adapter, &crq);
3971 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3972 atomic_inc(&adapter->running_cap_crqs);
3973 ibmvnic_send_crq(adapter, &crq);
3975 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3976 atomic_inc(&adapter->running_cap_crqs);
3977 ibmvnic_send_crq(adapter, &crq);
3979 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3980 atomic_inc(&adapter->running_cap_crqs);
3981 ibmvnic_send_crq(adapter, &crq);
3983 crq.query_capability.capability =
3984 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3985 atomic_inc(&adapter->running_cap_crqs);
3986 ibmvnic_send_crq(adapter, &crq);
3988 crq.query_capability.capability =
3989 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3990 atomic_inc(&adapter->running_cap_crqs);
3991 ibmvnic_send_crq(adapter, &crq);
3993 crq.query_capability.capability =
3994 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3995 atomic_inc(&adapter->running_cap_crqs);
3996 ibmvnic_send_crq(adapter, &crq);
3998 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3999 atomic_inc(&adapter->running_cap_crqs);
4000 ibmvnic_send_crq(adapter, &crq);
4003 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4005 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4006 struct device *dev = &adapter->vdev->dev;
4007 union ibmvnic_crq crq;
4009 adapter->ip_offload_tok =
4011 &adapter->ip_offload_buf,
4015 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4016 if (!firmware_has_feature(FW_FEATURE_CMO))
4017 dev_err(dev, "Couldn't map offload buffer\n");
4021 memset(&crq, 0, sizeof(crq));
4022 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4023 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4024 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4025 crq.query_ip_offload.ioba =
4026 cpu_to_be32(adapter->ip_offload_tok);
4028 ibmvnic_send_crq(adapter, &crq);
4031 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4033 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4034 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4035 struct device *dev = &adapter->vdev->dev;
4036 netdev_features_t old_hw_features = 0;
4037 union ibmvnic_crq crq;
4039 adapter->ip_offload_ctrl_tok =
4042 sizeof(adapter->ip_offload_ctrl),
4045 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4046 dev_err(dev, "Couldn't map ip offload control buffer\n");
4050 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4051 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4052 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4053 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4054 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4055 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4056 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4057 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4058 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4059 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4061 /* large_rx disabled for now, additional features needed */
4062 ctrl_buf->large_rx_ipv4 = 0;
4063 ctrl_buf->large_rx_ipv6 = 0;
4065 if (adapter->state != VNIC_PROBING) {
4066 old_hw_features = adapter->netdev->hw_features;
4067 adapter->netdev->hw_features = 0;
4070 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4072 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4073 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4075 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4076 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4078 if ((adapter->netdev->features &
4079 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4080 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4082 if (buf->large_tx_ipv4)
4083 adapter->netdev->hw_features |= NETIF_F_TSO;
4084 if (buf->large_tx_ipv6)
4085 adapter->netdev->hw_features |= NETIF_F_TSO6;
4087 if (adapter->state == VNIC_PROBING) {
4088 adapter->netdev->features |= adapter->netdev->hw_features;
4089 } else if (old_hw_features != adapter->netdev->hw_features) {
4090 netdev_features_t tmp = 0;
4092 /* disable features no longer supported */
4093 adapter->netdev->features &= adapter->netdev->hw_features;
4094 /* turn on features now supported if previously enabled */
4095 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4096 adapter->netdev->hw_features;
4097 adapter->netdev->features |=
4098 tmp & adapter->netdev->wanted_features;
4101 memset(&crq, 0, sizeof(crq));
4102 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4103 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4104 crq.control_ip_offload.len =
4105 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4106 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4107 ibmvnic_send_crq(adapter, &crq);
4110 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4111 struct ibmvnic_adapter *adapter)
4113 struct device *dev = &adapter->vdev->dev;
4115 if (crq->get_vpd_size_rsp.rc.code) {
4116 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4117 crq->get_vpd_size_rsp.rc.code);
4118 complete(&adapter->fw_done);
4122 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4123 complete(&adapter->fw_done);
4126 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4127 struct ibmvnic_adapter *adapter)
4129 struct device *dev = &adapter->vdev->dev;
4130 unsigned char *substr = NULL;
4131 u8 fw_level_len = 0;
4133 memset(adapter->fw_version, 0, 32);
4135 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4138 if (crq->get_vpd_rsp.rc.code) {
4139 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4140 crq->get_vpd_rsp.rc.code);
4144 /* get the position of the firmware version info
4145 * located after the ASCII 'RM' substring in the buffer
4147 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4149 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4153 /* get length of firmware level ASCII substring */
4154 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4155 fw_level_len = *(substr + 2);
4157 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4161 /* copy firmware version string from vpd into adapter */
4162 if ((substr + 3 + fw_level_len) <
4163 (adapter->vpd->buff + adapter->vpd->len)) {
4164 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4166 dev_info(dev, "FW substr extrapolated VPD buff\n");
4170 if (adapter->fw_version[0] == '\0')
4171 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4172 complete(&adapter->fw_done);
4175 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4177 struct device *dev = &adapter->vdev->dev;
4178 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4181 dma_unmap_single(dev, adapter->ip_offload_tok,
4182 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4184 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4185 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4186 netdev_dbg(adapter->netdev, "%016lx\n",
4187 ((unsigned long int *)(buf))[i]);
4189 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4190 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4191 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4192 buf->tcp_ipv4_chksum);
4193 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4194 buf->tcp_ipv6_chksum);
4195 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4196 buf->udp_ipv4_chksum);
4197 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4198 buf->udp_ipv6_chksum);
4199 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4200 buf->large_tx_ipv4);
4201 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4202 buf->large_tx_ipv6);
4203 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4204 buf->large_rx_ipv4);
4205 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4206 buf->large_rx_ipv6);
4207 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4208 buf->max_ipv4_header_size);
4209 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4210 buf->max_ipv6_header_size);
4211 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4212 buf->max_tcp_header_size);
4213 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4214 buf->max_udp_header_size);
4215 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4216 buf->max_large_tx_size);
4217 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4218 buf->max_large_rx_size);
4219 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4220 buf->ipv6_extension_header);
4221 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4222 buf->tcp_pseudosum_req);
4223 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4224 buf->num_ipv6_ext_headers);
4225 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4226 buf->off_ipv6_ext_headers);
4228 send_control_ip_offload(adapter);
4231 static const char *ibmvnic_fw_err_cause(u16 cause)
4234 case ADAPTER_PROBLEM:
4235 return "adapter problem";
4237 return "bus problem";
4239 return "firmware problem";
4241 return "device driver problem";
4243 return "EEH recovery";
4245 return "firmware updated";
4247 return "low Memory";
4253 static void handle_error_indication(union ibmvnic_crq *crq,
4254 struct ibmvnic_adapter *adapter)
4256 struct device *dev = &adapter->vdev->dev;
4259 cause = be16_to_cpu(crq->error_indication.error_cause);
4261 dev_warn_ratelimited(dev,
4262 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4263 crq->error_indication.flags
4264 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4265 ibmvnic_fw_err_cause(cause));
4267 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4268 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4270 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4273 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4274 struct ibmvnic_adapter *adapter)
4276 struct net_device *netdev = adapter->netdev;
4277 struct device *dev = &adapter->vdev->dev;
4280 rc = crq->change_mac_addr_rsp.rc.code;
4282 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4285 /* crq->change_mac_addr.mac_addr is the requested one
4286 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4288 ether_addr_copy(netdev->dev_addr,
4289 &crq->change_mac_addr_rsp.mac_addr[0]);
4290 ether_addr_copy(adapter->mac_addr,
4291 &crq->change_mac_addr_rsp.mac_addr[0]);
4293 complete(&adapter->fw_done);
4297 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4298 struct ibmvnic_adapter *adapter)
4300 struct device *dev = &adapter->vdev->dev;
4304 atomic_dec(&adapter->running_cap_crqs);
4305 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4307 req_value = &adapter->req_tx_queues;
4311 req_value = &adapter->req_rx_queues;
4314 case REQ_RX_ADD_QUEUES:
4315 req_value = &adapter->req_rx_add_queues;
4318 case REQ_TX_ENTRIES_PER_SUBCRQ:
4319 req_value = &adapter->req_tx_entries_per_subcrq;
4320 name = "tx_entries_per_subcrq";
4322 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4323 req_value = &adapter->req_rx_add_entries_per_subcrq;
4324 name = "rx_add_entries_per_subcrq";
4327 req_value = &adapter->req_mtu;
4330 case PROMISC_REQUESTED:
4331 req_value = &adapter->promisc;
4335 dev_err(dev, "Got invalid cap request rsp %d\n",
4336 crq->request_capability.capability);
4340 switch (crq->request_capability_rsp.rc.code) {
4343 case PARTIALSUCCESS:
4344 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4346 (long int)be64_to_cpu(crq->request_capability_rsp.
4349 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4351 pr_err("mtu of %llu is not supported. Reverting.\n",
4353 *req_value = adapter->fallback.mtu;
4356 be64_to_cpu(crq->request_capability_rsp.number);
4359 send_request_cap(adapter, 1);
4362 dev_err(dev, "Error %d in request cap rsp\n",
4363 crq->request_capability_rsp.rc.code);
4367 /* Done receiving requested capabilities, query IP offload support */
4368 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4369 adapter->wait_capability = false;
4370 send_query_ip_offload(adapter);
4374 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4375 struct ibmvnic_adapter *adapter)
4377 struct device *dev = &adapter->vdev->dev;
4378 struct net_device *netdev = adapter->netdev;
4379 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4380 struct ibmvnic_login_buffer *login = adapter->login_buf;
4381 u64 *tx_handle_array;
4382 u64 *rx_handle_array;
4388 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4390 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4391 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4393 /* If the number of queues requested can't be allocated by the
4394 * server, the login response will return with code 1. We will need
4395 * to resend the login buffer with fewer queues requested.
4397 if (login_rsp_crq->generic.rc.code) {
4398 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4399 complete(&adapter->init_done);
4403 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4405 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4406 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4407 netdev_dbg(adapter->netdev, "%016lx\n",
4408 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4412 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4413 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4414 adapter->req_rx_add_queues !=
4415 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4416 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4417 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4420 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4421 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4422 /* variable buffer sizes are not supported, so just read the
4425 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4427 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4428 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4430 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4431 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4432 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4433 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4435 for (i = 0; i < num_tx_pools; i++)
4436 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4438 for (i = 0; i < num_rx_pools; i++)
4439 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4441 adapter->num_active_tx_scrqs = num_tx_pools;
4442 adapter->num_active_rx_scrqs = num_rx_pools;
4443 release_login_rsp_buffer(adapter);
4444 release_login_buffer(adapter);
4445 complete(&adapter->init_done);
4450 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4451 struct ibmvnic_adapter *adapter)
4453 struct device *dev = &adapter->vdev->dev;
4456 rc = crq->request_unmap_rsp.rc.code;
4458 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4461 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4462 struct ibmvnic_adapter *adapter)
4464 struct net_device *netdev = adapter->netdev;
4465 struct device *dev = &adapter->vdev->dev;
4468 rc = crq->query_map_rsp.rc.code;
4470 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4473 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4474 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4475 crq->query_map_rsp.free_pages);
4478 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4479 struct ibmvnic_adapter *adapter)
4481 struct net_device *netdev = adapter->netdev;
4482 struct device *dev = &adapter->vdev->dev;
4485 atomic_dec(&adapter->running_cap_crqs);
4486 netdev_dbg(netdev, "Outstanding queries: %d\n",
4487 atomic_read(&adapter->running_cap_crqs));
4488 rc = crq->query_capability.rc.code;
4490 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4494 switch (be16_to_cpu(crq->query_capability.capability)) {
4496 adapter->min_tx_queues =
4497 be64_to_cpu(crq->query_capability.number);
4498 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4499 adapter->min_tx_queues);
4502 adapter->min_rx_queues =
4503 be64_to_cpu(crq->query_capability.number);
4504 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4505 adapter->min_rx_queues);
4507 case MIN_RX_ADD_QUEUES:
4508 adapter->min_rx_add_queues =
4509 be64_to_cpu(crq->query_capability.number);
4510 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4511 adapter->min_rx_add_queues);
4514 adapter->max_tx_queues =
4515 be64_to_cpu(crq->query_capability.number);
4516 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4517 adapter->max_tx_queues);
4520 adapter->max_rx_queues =
4521 be64_to_cpu(crq->query_capability.number);
4522 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4523 adapter->max_rx_queues);
4525 case MAX_RX_ADD_QUEUES:
4526 adapter->max_rx_add_queues =
4527 be64_to_cpu(crq->query_capability.number);
4528 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4529 adapter->max_rx_add_queues);
4531 case MIN_TX_ENTRIES_PER_SUBCRQ:
4532 adapter->min_tx_entries_per_subcrq =
4533 be64_to_cpu(crq->query_capability.number);
4534 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4535 adapter->min_tx_entries_per_subcrq);
4537 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4538 adapter->min_rx_add_entries_per_subcrq =
4539 be64_to_cpu(crq->query_capability.number);
4540 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4541 adapter->min_rx_add_entries_per_subcrq);
4543 case MAX_TX_ENTRIES_PER_SUBCRQ:
4544 adapter->max_tx_entries_per_subcrq =
4545 be64_to_cpu(crq->query_capability.number);
4546 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4547 adapter->max_tx_entries_per_subcrq);
4549 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4550 adapter->max_rx_add_entries_per_subcrq =
4551 be64_to_cpu(crq->query_capability.number);
4552 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4553 adapter->max_rx_add_entries_per_subcrq);
4555 case TCP_IP_OFFLOAD:
4556 adapter->tcp_ip_offload =
4557 be64_to_cpu(crq->query_capability.number);
4558 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4559 adapter->tcp_ip_offload);
4561 case PROMISC_SUPPORTED:
4562 adapter->promisc_supported =
4563 be64_to_cpu(crq->query_capability.number);
4564 netdev_dbg(netdev, "promisc_supported = %lld\n",
4565 adapter->promisc_supported);
4568 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4569 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4570 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4573 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4574 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4575 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4577 case MAX_MULTICAST_FILTERS:
4578 adapter->max_multicast_filters =
4579 be64_to_cpu(crq->query_capability.number);
4580 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4581 adapter->max_multicast_filters);
4583 case VLAN_HEADER_INSERTION:
4584 adapter->vlan_header_insertion =
4585 be64_to_cpu(crq->query_capability.number);
4586 if (adapter->vlan_header_insertion)
4587 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4588 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4589 adapter->vlan_header_insertion);
4591 case RX_VLAN_HEADER_INSERTION:
4592 adapter->rx_vlan_header_insertion =
4593 be64_to_cpu(crq->query_capability.number);
4594 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4595 adapter->rx_vlan_header_insertion);
4597 case MAX_TX_SG_ENTRIES:
4598 adapter->max_tx_sg_entries =
4599 be64_to_cpu(crq->query_capability.number);
4600 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4601 adapter->max_tx_sg_entries);
4603 case RX_SG_SUPPORTED:
4604 adapter->rx_sg_supported =
4605 be64_to_cpu(crq->query_capability.number);
4606 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4607 adapter->rx_sg_supported);
4609 case OPT_TX_COMP_SUB_QUEUES:
4610 adapter->opt_tx_comp_sub_queues =
4611 be64_to_cpu(crq->query_capability.number);
4612 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4613 adapter->opt_tx_comp_sub_queues);
4615 case OPT_RX_COMP_QUEUES:
4616 adapter->opt_rx_comp_queues =
4617 be64_to_cpu(crq->query_capability.number);
4618 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4619 adapter->opt_rx_comp_queues);
4621 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4622 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4623 be64_to_cpu(crq->query_capability.number);
4624 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4625 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4627 case OPT_TX_ENTRIES_PER_SUBCRQ:
4628 adapter->opt_tx_entries_per_subcrq =
4629 be64_to_cpu(crq->query_capability.number);
4630 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4631 adapter->opt_tx_entries_per_subcrq);
4633 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4634 adapter->opt_rxba_entries_per_subcrq =
4635 be64_to_cpu(crq->query_capability.number);
4636 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4637 adapter->opt_rxba_entries_per_subcrq);
4639 case TX_RX_DESC_REQ:
4640 adapter->tx_rx_desc_req = crq->query_capability.number;
4641 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4642 adapter->tx_rx_desc_req);
4646 netdev_err(netdev, "Got invalid cap rsp %d\n",
4647 crq->query_capability.capability);
4651 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4652 adapter->wait_capability = false;
4653 send_request_cap(adapter, 0);
4657 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4659 union ibmvnic_crq crq;
4662 memset(&crq, 0, sizeof(crq));
4663 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4664 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4666 mutex_lock(&adapter->fw_lock);
4667 adapter->fw_done_rc = 0;
4668 reinit_completion(&adapter->fw_done);
4670 rc = ibmvnic_send_crq(adapter, &crq);
4672 mutex_unlock(&adapter->fw_lock);
4676 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4678 mutex_unlock(&adapter->fw_lock);
4682 mutex_unlock(&adapter->fw_lock);
4683 return adapter->fw_done_rc ? -EIO : 0;
4686 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4687 struct ibmvnic_adapter *adapter)
4689 struct net_device *netdev = adapter->netdev;
4691 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4693 rc = crq->query_phys_parms_rsp.rc.code;
4695 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4699 case IBMVNIC_10MBPS:
4700 adapter->speed = SPEED_10;
4702 case IBMVNIC_100MBPS:
4703 adapter->speed = SPEED_100;
4706 adapter->speed = SPEED_1000;
4708 case IBMVNIC_10GBPS:
4709 adapter->speed = SPEED_10000;
4711 case IBMVNIC_25GBPS:
4712 adapter->speed = SPEED_25000;
4714 case IBMVNIC_40GBPS:
4715 adapter->speed = SPEED_40000;
4717 case IBMVNIC_50GBPS:
4718 adapter->speed = SPEED_50000;
4720 case IBMVNIC_100GBPS:
4721 adapter->speed = SPEED_100000;
4723 case IBMVNIC_200GBPS:
4724 adapter->speed = SPEED_200000;
4727 if (netif_carrier_ok(netdev))
4728 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4729 adapter->speed = SPEED_UNKNOWN;
4731 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4732 adapter->duplex = DUPLEX_FULL;
4733 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4734 adapter->duplex = DUPLEX_HALF;
4736 adapter->duplex = DUPLEX_UNKNOWN;
4741 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4742 struct ibmvnic_adapter *adapter)
4744 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4745 struct net_device *netdev = adapter->netdev;
4746 struct device *dev = &adapter->vdev->dev;
4747 u64 *u64_crq = (u64 *)crq;
4750 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4751 (unsigned long int)cpu_to_be64(u64_crq[0]),
4752 (unsigned long int)cpu_to_be64(u64_crq[1]));
4753 switch (gen_crq->first) {
4754 case IBMVNIC_CRQ_INIT_RSP:
4755 switch (gen_crq->cmd) {
4756 case IBMVNIC_CRQ_INIT:
4757 dev_info(dev, "Partner initialized\n");
4758 adapter->from_passive_init = true;
4759 if (!completion_done(&adapter->init_done)) {
4760 complete(&adapter->init_done);
4761 adapter->init_done_rc = -EIO;
4763 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4765 case IBMVNIC_CRQ_INIT_COMPLETE:
4766 dev_info(dev, "Partner initialization complete\n");
4767 adapter->crq.active = true;
4768 send_version_xchg(adapter);
4771 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4774 case IBMVNIC_CRQ_XPORT_EVENT:
4775 netif_carrier_off(netdev);
4776 adapter->crq.active = false;
4777 /* terminate any thread waiting for a response
4780 if (!completion_done(&adapter->fw_done)) {
4781 adapter->fw_done_rc = -EIO;
4782 complete(&adapter->fw_done);
4784 if (!completion_done(&adapter->stats_done))
4785 complete(&adapter->stats_done);
4786 if (test_bit(0, &adapter->resetting))
4787 adapter->force_reset_recovery = true;
4788 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4789 dev_info(dev, "Migrated, re-enabling adapter\n");
4790 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4791 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4792 dev_info(dev, "Backing device failover detected\n");
4793 adapter->failover_pending = true;
4795 /* The adapter lost the connection */
4796 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4798 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4801 case IBMVNIC_CRQ_CMD_RSP:
4804 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4809 switch (gen_crq->cmd) {
4810 case VERSION_EXCHANGE_RSP:
4811 rc = crq->version_exchange_rsp.rc.code;
4813 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4817 be16_to_cpu(crq->version_exchange_rsp.version);
4818 dev_info(dev, "Partner protocol version is %d\n",
4820 send_query_cap(adapter);
4822 case QUERY_CAPABILITY_RSP:
4823 handle_query_cap_rsp(crq, adapter);
4826 handle_query_map_rsp(crq, adapter);
4828 case REQUEST_MAP_RSP:
4829 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4830 complete(&adapter->fw_done);
4832 case REQUEST_UNMAP_RSP:
4833 handle_request_unmap_rsp(crq, adapter);
4835 case REQUEST_CAPABILITY_RSP:
4836 handle_request_cap_rsp(crq, adapter);
4839 netdev_dbg(netdev, "Got Login Response\n");
4840 handle_login_rsp(crq, adapter);
4842 case LOGICAL_LINK_STATE_RSP:
4844 "Got Logical Link State Response, state: %d rc: %d\n",
4845 crq->logical_link_state_rsp.link_state,
4846 crq->logical_link_state_rsp.rc.code);
4847 adapter->logical_link_state =
4848 crq->logical_link_state_rsp.link_state;
4849 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4850 complete(&adapter->init_done);
4852 case LINK_STATE_INDICATION:
4853 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4854 adapter->phys_link_state =
4855 crq->link_state_indication.phys_link_state;
4856 adapter->logical_link_state =
4857 crq->link_state_indication.logical_link_state;
4858 if (adapter->phys_link_state && adapter->logical_link_state)
4859 netif_carrier_on(netdev);
4861 netif_carrier_off(netdev);
4863 case CHANGE_MAC_ADDR_RSP:
4864 netdev_dbg(netdev, "Got MAC address change Response\n");
4865 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4867 case ERROR_INDICATION:
4868 netdev_dbg(netdev, "Got Error Indication\n");
4869 handle_error_indication(crq, adapter);
4871 case REQUEST_STATISTICS_RSP:
4872 netdev_dbg(netdev, "Got Statistics Response\n");
4873 complete(&adapter->stats_done);
4875 case QUERY_IP_OFFLOAD_RSP:
4876 netdev_dbg(netdev, "Got Query IP offload Response\n");
4877 handle_query_ip_offload_rsp(adapter);
4879 case MULTICAST_CTRL_RSP:
4880 netdev_dbg(netdev, "Got multicast control Response\n");
4882 case CONTROL_IP_OFFLOAD_RSP:
4883 netdev_dbg(netdev, "Got Control IP offload Response\n");
4884 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4885 sizeof(adapter->ip_offload_ctrl),
4887 complete(&adapter->init_done);
4889 case COLLECT_FW_TRACE_RSP:
4890 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4891 complete(&adapter->fw_done);
4893 case GET_VPD_SIZE_RSP:
4894 handle_vpd_size_rsp(crq, adapter);
4897 handle_vpd_rsp(crq, adapter);
4899 case QUERY_PHYS_PARMS_RSP:
4900 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4901 complete(&adapter->fw_done);
4904 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4909 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4911 struct ibmvnic_adapter *adapter = instance;
4913 tasklet_schedule(&adapter->tasklet);
4917 static void ibmvnic_tasklet(struct tasklet_struct *t)
4919 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
4920 struct ibmvnic_crq_queue *queue = &adapter->crq;
4921 union ibmvnic_crq *crq;
4922 unsigned long flags;
4925 spin_lock_irqsave(&queue->lock, flags);
4927 /* Pull all the valid messages off the CRQ */
4928 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4929 ibmvnic_handle_crq(crq, adapter);
4930 crq->generic.first = 0;
4933 /* remain in tasklet until all
4934 * capabilities responses are received
4936 if (!adapter->wait_capability)
4939 /* if capabilities CRQ's were sent in this tasklet, the following
4940 * tasklet must wait until all responses are received
4942 if (atomic_read(&adapter->running_cap_crqs) != 0)
4943 adapter->wait_capability = true;
4944 spin_unlock_irqrestore(&queue->lock, flags);
4947 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4949 struct vio_dev *vdev = adapter->vdev;
4953 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4954 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4957 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4962 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4964 struct ibmvnic_crq_queue *crq = &adapter->crq;
4965 struct device *dev = &adapter->vdev->dev;
4966 struct vio_dev *vdev = adapter->vdev;
4971 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4972 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4974 /* Clean out the queue */
4978 memset(crq->msgs, 0, PAGE_SIZE);
4980 crq->active = false;
4982 /* And re-open it again */
4983 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4984 crq->msg_token, PAGE_SIZE);
4987 /* Adapter is good, but other end is not ready */
4988 dev_warn(dev, "Partner adapter not ready\n");
4990 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4995 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4997 struct ibmvnic_crq_queue *crq = &adapter->crq;
4998 struct vio_dev *vdev = adapter->vdev;
5004 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5005 free_irq(vdev->irq, adapter);
5006 tasklet_kill(&adapter->tasklet);
5008 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5009 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5011 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5013 free_page((unsigned long)crq->msgs);
5015 crq->active = false;
5018 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5020 struct ibmvnic_crq_queue *crq = &adapter->crq;
5021 struct device *dev = &adapter->vdev->dev;
5022 struct vio_dev *vdev = adapter->vdev;
5023 int rc, retrc = -ENOMEM;
5028 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5029 /* Should we allocate more than one page? */
5034 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5035 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5037 if (dma_mapping_error(dev, crq->msg_token))
5040 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5041 crq->msg_token, PAGE_SIZE);
5043 if (rc == H_RESOURCE)
5044 /* maybe kexecing and resource is busy. try a reset */
5045 rc = ibmvnic_reset_crq(adapter);
5048 if (rc == H_CLOSED) {
5049 dev_warn(dev, "Partner adapter not ready\n");
5051 dev_warn(dev, "Error %d opening adapter\n", rc);
5052 goto reg_crq_failed;
5057 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5059 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5060 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5061 adapter->vdev->unit_address);
5062 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5064 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5066 goto req_irq_failed;
5069 rc = vio_enable_interrupts(vdev);
5071 dev_err(dev, "Error %d enabling interrupts\n", rc);
5072 goto req_irq_failed;
5076 spin_lock_init(&crq->lock);
5081 tasklet_kill(&adapter->tasklet);
5083 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5084 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5086 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5088 free_page((unsigned long)crq->msgs);
5093 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5095 struct device *dev = &adapter->vdev->dev;
5096 unsigned long timeout = msecs_to_jiffies(30000);
5097 u64 old_num_rx_queues, old_num_tx_queues;
5100 adapter->from_passive_init = false;
5103 old_num_rx_queues = adapter->req_rx_queues;
5104 old_num_tx_queues = adapter->req_tx_queues;
5105 reinit_completion(&adapter->init_done);
5108 adapter->init_done_rc = 0;
5109 rc = ibmvnic_send_crq_init(adapter);
5111 dev_err(dev, "Send crq init failed with error %d\n", rc);
5115 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5116 dev_err(dev, "Initialization sequence timed out\n");
5120 if (adapter->init_done_rc) {
5121 release_crq_queue(adapter);
5122 return adapter->init_done_rc;
5125 if (adapter->from_passive_init) {
5126 adapter->state = VNIC_OPEN;
5127 adapter->from_passive_init = false;
5132 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5133 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5134 if (adapter->req_rx_queues != old_num_rx_queues ||
5135 adapter->req_tx_queues != old_num_tx_queues) {
5136 release_sub_crqs(adapter, 0);
5137 rc = init_sub_crqs(adapter);
5139 rc = reset_sub_crq_queues(adapter);
5142 rc = init_sub_crqs(adapter);
5146 dev_err(dev, "Initialization of sub crqs failed\n");
5147 release_crq_queue(adapter);
5151 rc = init_sub_crq_irqs(adapter);
5153 dev_err(dev, "Failed to initialize sub crq irqs\n");
5154 release_crq_queue(adapter);
5160 static struct device_attribute dev_attr_failover;
5162 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5164 struct ibmvnic_adapter *adapter;
5165 struct net_device *netdev;
5166 unsigned char *mac_addr_p;
5169 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5172 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5173 VETH_MAC_ADDR, NULL);
5176 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5177 __FILE__, __LINE__);
5181 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5182 IBMVNIC_MAX_QUEUES);
5186 adapter = netdev_priv(netdev);
5187 adapter->state = VNIC_PROBING;
5188 dev_set_drvdata(&dev->dev, netdev);
5189 adapter->vdev = dev;
5190 adapter->netdev = netdev;
5192 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5193 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5194 netdev->irq = dev->irq;
5195 netdev->netdev_ops = &ibmvnic_netdev_ops;
5196 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5197 SET_NETDEV_DEV(netdev, &dev->dev);
5199 spin_lock_init(&adapter->stats_lock);
5201 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5202 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5203 __ibmvnic_delayed_reset);
5204 INIT_LIST_HEAD(&adapter->rwi_list);
5205 spin_lock_init(&adapter->rwi_lock);
5206 spin_lock_init(&adapter->state_lock);
5207 mutex_init(&adapter->fw_lock);
5208 init_completion(&adapter->init_done);
5209 init_completion(&adapter->fw_done);
5210 init_completion(&adapter->reset_done);
5211 init_completion(&adapter->stats_done);
5212 clear_bit(0, &adapter->resetting);
5215 rc = init_crq_queue(adapter);
5217 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5219 goto ibmvnic_init_fail;
5222 rc = ibmvnic_reset_init(adapter, false);
5223 if (rc && rc != EAGAIN)
5224 goto ibmvnic_init_fail;
5225 } while (rc == EAGAIN);
5227 rc = init_stats_buffers(adapter);
5229 goto ibmvnic_init_fail;
5231 rc = init_stats_token(adapter);
5233 goto ibmvnic_stats_fail;
5235 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5236 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5237 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5239 rc = device_create_file(&dev->dev, &dev_attr_failover);
5241 goto ibmvnic_dev_file_err;
5243 netif_carrier_off(netdev);
5244 rc = register_netdev(netdev);
5246 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5247 goto ibmvnic_register_fail;
5249 dev_info(&dev->dev, "ibmvnic registered\n");
5251 adapter->state = VNIC_PROBED;
5253 adapter->wait_for_reset = false;
5257 ibmvnic_register_fail:
5258 device_remove_file(&dev->dev, &dev_attr_failover);
5260 ibmvnic_dev_file_err:
5261 release_stats_token(adapter);
5264 release_stats_buffers(adapter);
5267 release_sub_crqs(adapter, 1);
5268 release_crq_queue(adapter);
5269 mutex_destroy(&adapter->fw_lock);
5270 free_netdev(netdev);
5275 static int ibmvnic_remove(struct vio_dev *dev)
5277 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5278 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5279 unsigned long flags;
5281 spin_lock_irqsave(&adapter->state_lock, flags);
5282 if (test_bit(0, &adapter->resetting)) {
5283 spin_unlock_irqrestore(&adapter->state_lock, flags);
5287 adapter->state = VNIC_REMOVING;
5288 spin_unlock_irqrestore(&adapter->state_lock, flags);
5290 flush_work(&adapter->ibmvnic_reset);
5291 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5294 unregister_netdevice(netdev);
5296 release_resources(adapter);
5297 release_sub_crqs(adapter, 1);
5298 release_crq_queue(adapter);
5300 release_stats_token(adapter);
5301 release_stats_buffers(adapter);
5303 adapter->state = VNIC_REMOVED;
5306 mutex_destroy(&adapter->fw_lock);
5307 device_remove_file(&dev->dev, &dev_attr_failover);
5308 free_netdev(netdev);
5309 dev_set_drvdata(&dev->dev, NULL);
5314 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5315 const char *buf, size_t count)
5317 struct net_device *netdev = dev_get_drvdata(dev);
5318 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5319 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5320 __be64 session_token;
5323 if (!sysfs_streq(buf, "1"))
5326 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5327 H_GET_SESSION_TOKEN, 0, 0, 0);
5329 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5334 session_token = (__be64)retbuf[0];
5335 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5336 be64_to_cpu(session_token));
5337 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5338 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5340 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5348 static DEVICE_ATTR_WO(failover);
5350 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5352 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5353 struct ibmvnic_adapter *adapter;
5354 struct iommu_table *tbl;
5355 unsigned long ret = 0;
5358 tbl = get_iommu_table_base(&vdev->dev);
5360 /* netdev inits at probe time along with the structures we need below*/
5362 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5364 adapter = netdev_priv(netdev);
5366 ret += PAGE_SIZE; /* the crq message queue */
5367 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5369 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5370 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5372 for (i = 0; i < adapter->num_active_rx_pools; i++)
5373 ret += adapter->rx_pool[i].size *
5374 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5379 static int ibmvnic_resume(struct device *dev)
5381 struct net_device *netdev = dev_get_drvdata(dev);
5382 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5384 if (adapter->state != VNIC_OPEN)
5387 tasklet_schedule(&adapter->tasklet);
5392 static const struct vio_device_id ibmvnic_device_table[] = {
5393 {"network", "IBM,vnic"},
5396 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5398 static const struct dev_pm_ops ibmvnic_pm_ops = {
5399 .resume = ibmvnic_resume
5402 static struct vio_driver ibmvnic_driver = {
5403 .id_table = ibmvnic_device_table,
5404 .probe = ibmvnic_probe,
5405 .remove = ibmvnic_remove,
5406 .get_desired_dma = ibmvnic_get_desired_dma,
5407 .name = ibmvnic_driver_name,
5408 .pm = &ibmvnic_pm_ops,
5411 /* module functions */
5412 static int __init ibmvnic_module_init(void)
5414 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5415 IBMVNIC_DRIVER_VERSION);
5417 return vio_register_driver(&ibmvnic_driver);
5420 static void __exit ibmvnic_module_exit(void)
5422 vio_unregister_driver(&ibmvnic_driver);
5425 module_init(ibmvnic_module_init);
5426 module_exit(ibmvnic_module_exit);