1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_map_query(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_cap_queries(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_init(struct ibmvnic_adapter *);
108 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
109 static void release_crq_queue(struct ibmvnic_adapter *);
110 static int __ibmvnic_set_mac(struct net_device *, u8 *);
111 static int init_crq_queue(struct ibmvnic_adapter *adapter);
112 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
163 * ibmvnic_wait_for_completion - Check device state and wait for completion
164 * @adapter: private device data
165 * @comp_done: completion structure to wait for
166 * @timeout: time to wait in milliseconds
168 * Wait for a completion signal or until the timeout limit is reached
169 * while checking that the device is still active.
171 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
172 struct completion *comp_done,
173 unsigned long timeout)
175 struct net_device *netdev;
176 unsigned long div_timeout;
179 netdev = adapter->netdev;
181 div_timeout = msecs_to_jiffies(timeout / retry);
183 if (!adapter->crq.active) {
184 netdev_err(netdev, "Device down!\n");
189 if (wait_for_completion_timeout(comp_done, div_timeout))
192 netdev_err(netdev, "Operation timed out.\n");
196 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
197 struct ibmvnic_long_term_buff *ltb, int size)
199 struct device *dev = &adapter->vdev->dev;
203 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
207 dev_err(dev, "Couldn't alloc long term buffer\n");
210 ltb->map_id = adapter->map_id;
213 mutex_lock(&adapter->fw_lock);
214 adapter->fw_done_rc = 0;
215 reinit_completion(&adapter->fw_done);
216 rc = send_request_map(adapter, ltb->addr,
217 ltb->size, ltb->map_id);
219 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
220 mutex_unlock(&adapter->fw_lock);
224 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
227 "Long term map request aborted or timed out,rc = %d\n",
229 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
230 mutex_unlock(&adapter->fw_lock);
234 if (adapter->fw_done_rc) {
235 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
236 adapter->fw_done_rc);
237 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
238 mutex_unlock(&adapter->fw_lock);
241 mutex_unlock(&adapter->fw_lock);
245 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
246 struct ibmvnic_long_term_buff *ltb)
248 struct device *dev = &adapter->vdev->dev;
253 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
254 adapter->reset_reason != VNIC_RESET_MOBILITY)
255 send_request_unmap(adapter, ltb->map_id);
256 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
259 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
260 struct ibmvnic_long_term_buff *ltb)
262 struct device *dev = &adapter->vdev->dev;
265 memset(ltb->buff, 0, ltb->size);
267 mutex_lock(&adapter->fw_lock);
268 adapter->fw_done_rc = 0;
270 reinit_completion(&adapter->fw_done);
271 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
273 mutex_unlock(&adapter->fw_lock);
277 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
280 "Reset failed, long term map request timed out or aborted\n");
281 mutex_unlock(&adapter->fw_lock);
285 if (adapter->fw_done_rc) {
287 "Reset failed, attempting to free and reallocate buffer\n");
288 free_long_term_buff(adapter, ltb);
289 mutex_unlock(&adapter->fw_lock);
290 return alloc_long_term_buff(adapter, ltb, ltb->size);
292 mutex_unlock(&adapter->fw_lock);
296 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
300 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
302 adapter->rx_pool[i].active = 0;
305 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
306 struct ibmvnic_rx_pool *pool)
308 int count = pool->size - atomic_read(&pool->available);
309 struct device *dev = &adapter->vdev->dev;
310 int buffers_added = 0;
311 unsigned long lpar_rc;
312 union sub_crq sub_crq;
325 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
326 be32_to_cpu(adapter->login_rsp_buf->
329 for (i = 0; i < count; ++i) {
330 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
332 dev_err(dev, "Couldn't replenish rx buff\n");
333 adapter->replenish_no_mem++;
337 index = pool->free_map[pool->next_free];
339 if (pool->rx_buff[index].skb)
340 dev_err(dev, "Inconsistent free_map!\n");
342 /* Copy the skb to the long term mapped DMA buffer */
343 offset = index * pool->buff_size;
344 dst = pool->long_term_buff.buff + offset;
345 memset(dst, 0, pool->buff_size);
346 dma_addr = pool->long_term_buff.addr + offset;
347 pool->rx_buff[index].data = dst;
349 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
350 pool->rx_buff[index].dma = dma_addr;
351 pool->rx_buff[index].skb = skb;
352 pool->rx_buff[index].pool_index = pool->index;
353 pool->rx_buff[index].size = pool->buff_size;
355 memset(&sub_crq, 0, sizeof(sub_crq));
356 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
357 sub_crq.rx_add.correlator =
358 cpu_to_be64((u64)&pool->rx_buff[index]);
359 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
360 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
362 /* The length field of the sCRQ is defined to be 24 bits so the
363 * buffer size needs to be left shifted by a byte before it is
364 * converted to big endian to prevent the last byte from being
367 #ifdef __LITTLE_ENDIAN__
370 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
372 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
374 if (lpar_rc != H_SUCCESS)
378 adapter->replenish_add_buff_success++;
379 pool->next_free = (pool->next_free + 1) % pool->size;
381 atomic_add(buffers_added, &pool->available);
385 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
386 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
387 pool->free_map[pool->next_free] = index;
388 pool->rx_buff[index].skb = NULL;
390 dev_kfree_skb_any(skb);
391 adapter->replenish_add_buff_failure++;
392 atomic_add(buffers_added, &pool->available);
394 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
395 /* Disable buffer pool replenishment and report carrier off if
396 * queue is closed or pending failover.
397 * Firmware guarantees that a signal will be sent to the
398 * driver, triggering a reset.
400 deactivate_rx_pools(adapter);
401 netif_carrier_off(adapter->netdev);
405 static void replenish_pools(struct ibmvnic_adapter *adapter)
409 adapter->replenish_task_cycles++;
410 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
412 if (adapter->rx_pool[i].active)
413 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
417 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
419 kfree(adapter->tx_stats_buffers);
420 kfree(adapter->rx_stats_buffers);
421 adapter->tx_stats_buffers = NULL;
422 adapter->rx_stats_buffers = NULL;
425 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
427 adapter->tx_stats_buffers =
428 kcalloc(IBMVNIC_MAX_QUEUES,
429 sizeof(struct ibmvnic_tx_queue_stats),
431 if (!adapter->tx_stats_buffers)
434 adapter->rx_stats_buffers =
435 kcalloc(IBMVNIC_MAX_QUEUES,
436 sizeof(struct ibmvnic_rx_queue_stats),
438 if (!adapter->rx_stats_buffers)
444 static void release_stats_token(struct ibmvnic_adapter *adapter)
446 struct device *dev = &adapter->vdev->dev;
448 if (!adapter->stats_token)
451 dma_unmap_single(dev, adapter->stats_token,
452 sizeof(struct ibmvnic_statistics),
454 adapter->stats_token = 0;
457 static int init_stats_token(struct ibmvnic_adapter *adapter)
459 struct device *dev = &adapter->vdev->dev;
462 stok = dma_map_single(dev, &adapter->stats,
463 sizeof(struct ibmvnic_statistics),
465 if (dma_mapping_error(dev, stok)) {
466 dev_err(dev, "Couldn't map stats buffer\n");
470 adapter->stats_token = stok;
471 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
475 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
477 struct ibmvnic_rx_pool *rx_pool;
482 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
483 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
485 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
486 for (i = 0; i < rx_scrqs; i++) {
487 rx_pool = &adapter->rx_pool[i];
489 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
491 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
492 free_long_term_buff(adapter, &rx_pool->long_term_buff);
493 rx_pool->buff_size = be64_to_cpu(size_array[i]);
494 rc = alloc_long_term_buff(adapter,
495 &rx_pool->long_term_buff,
499 rc = reset_long_term_buff(adapter,
500 &rx_pool->long_term_buff);
506 for (j = 0; j < rx_pool->size; j++)
507 rx_pool->free_map[j] = j;
509 memset(rx_pool->rx_buff, 0,
510 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
512 atomic_set(&rx_pool->available, 0);
513 rx_pool->next_alloc = 0;
514 rx_pool->next_free = 0;
521 static void release_rx_pools(struct ibmvnic_adapter *adapter)
523 struct ibmvnic_rx_pool *rx_pool;
526 if (!adapter->rx_pool)
529 for (i = 0; i < adapter->num_active_rx_pools; i++) {
530 rx_pool = &adapter->rx_pool[i];
532 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
534 kfree(rx_pool->free_map);
535 free_long_term_buff(adapter, &rx_pool->long_term_buff);
537 if (!rx_pool->rx_buff)
540 for (j = 0; j < rx_pool->size; j++) {
541 if (rx_pool->rx_buff[j].skb) {
542 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
543 rx_pool->rx_buff[j].skb = NULL;
547 kfree(rx_pool->rx_buff);
550 kfree(adapter->rx_pool);
551 adapter->rx_pool = NULL;
552 adapter->num_active_rx_pools = 0;
555 static int init_rx_pools(struct net_device *netdev)
557 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
558 struct device *dev = &adapter->vdev->dev;
559 struct ibmvnic_rx_pool *rx_pool;
565 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
566 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
567 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
569 adapter->rx_pool = kcalloc(rxadd_subcrqs,
570 sizeof(struct ibmvnic_rx_pool),
572 if (!adapter->rx_pool) {
573 dev_err(dev, "Failed to allocate rx pools\n");
577 adapter->num_active_rx_pools = rxadd_subcrqs;
579 for (i = 0; i < rxadd_subcrqs; i++) {
580 rx_pool = &adapter->rx_pool[i];
582 netdev_dbg(adapter->netdev,
583 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
584 i, adapter->req_rx_add_entries_per_subcrq,
585 be64_to_cpu(size_array[i]));
587 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
589 rx_pool->buff_size = be64_to_cpu(size_array[i]);
592 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
594 if (!rx_pool->free_map) {
595 release_rx_pools(adapter);
599 rx_pool->rx_buff = kcalloc(rx_pool->size,
600 sizeof(struct ibmvnic_rx_buff),
602 if (!rx_pool->rx_buff) {
603 dev_err(dev, "Couldn't alloc rx buffers\n");
604 release_rx_pools(adapter);
608 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
609 rx_pool->size * rx_pool->buff_size)) {
610 release_rx_pools(adapter);
614 for (j = 0; j < rx_pool->size; ++j)
615 rx_pool->free_map[j] = j;
617 atomic_set(&rx_pool->available, 0);
618 rx_pool->next_alloc = 0;
619 rx_pool->next_free = 0;
625 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
626 struct ibmvnic_tx_pool *tx_pool)
630 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
634 memset(tx_pool->tx_buff, 0,
635 tx_pool->num_buffers *
636 sizeof(struct ibmvnic_tx_buff));
638 for (i = 0; i < tx_pool->num_buffers; i++)
639 tx_pool->free_map[i] = i;
641 tx_pool->consumer_index = 0;
642 tx_pool->producer_index = 0;
647 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
652 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
653 for (i = 0; i < tx_scrqs; i++) {
654 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
657 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
665 static void release_vpd_data(struct ibmvnic_adapter *adapter)
670 kfree(adapter->vpd->buff);
676 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
677 struct ibmvnic_tx_pool *tx_pool)
679 kfree(tx_pool->tx_buff);
680 kfree(tx_pool->free_map);
681 free_long_term_buff(adapter, &tx_pool->long_term_buff);
684 static void release_tx_pools(struct ibmvnic_adapter *adapter)
688 if (!adapter->tx_pool)
691 for (i = 0; i < adapter->num_active_tx_pools; i++) {
692 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
693 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
696 kfree(adapter->tx_pool);
697 adapter->tx_pool = NULL;
698 kfree(adapter->tso_pool);
699 adapter->tso_pool = NULL;
700 adapter->num_active_tx_pools = 0;
703 static int init_one_tx_pool(struct net_device *netdev,
704 struct ibmvnic_tx_pool *tx_pool,
705 int num_entries, int buf_size)
707 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
710 tx_pool->tx_buff = kcalloc(num_entries,
711 sizeof(struct ibmvnic_tx_buff),
713 if (!tx_pool->tx_buff)
716 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
717 num_entries * buf_size))
720 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
721 if (!tx_pool->free_map)
724 for (i = 0; i < num_entries; i++)
725 tx_pool->free_map[i] = i;
727 tx_pool->consumer_index = 0;
728 tx_pool->producer_index = 0;
729 tx_pool->num_buffers = num_entries;
730 tx_pool->buf_size = buf_size;
735 static int init_tx_pools(struct net_device *netdev)
737 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
741 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
742 adapter->tx_pool = kcalloc(tx_subcrqs,
743 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
744 if (!adapter->tx_pool)
747 adapter->tso_pool = kcalloc(tx_subcrqs,
748 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
749 if (!adapter->tso_pool)
752 adapter->num_active_tx_pools = tx_subcrqs;
754 for (i = 0; i < tx_subcrqs; i++) {
755 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
756 adapter->req_tx_entries_per_subcrq,
757 adapter->req_mtu + VLAN_HLEN);
759 release_tx_pools(adapter);
763 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
767 release_tx_pools(adapter);
775 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
779 if (adapter->napi_enabled)
782 for (i = 0; i < adapter->req_rx_queues; i++)
783 napi_enable(&adapter->napi[i]);
785 adapter->napi_enabled = true;
788 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
792 if (!adapter->napi_enabled)
795 for (i = 0; i < adapter->req_rx_queues; i++) {
796 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
797 napi_disable(&adapter->napi[i]);
800 adapter->napi_enabled = false;
803 static int init_napi(struct ibmvnic_adapter *adapter)
807 adapter->napi = kcalloc(adapter->req_rx_queues,
808 sizeof(struct napi_struct), GFP_KERNEL);
812 for (i = 0; i < adapter->req_rx_queues; i++) {
813 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
814 netif_napi_add(adapter->netdev, &adapter->napi[i],
815 ibmvnic_poll, NAPI_POLL_WEIGHT);
818 adapter->num_active_rx_napi = adapter->req_rx_queues;
822 static void release_napi(struct ibmvnic_adapter *adapter)
829 for (i = 0; i < adapter->num_active_rx_napi; i++) {
830 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
831 netif_napi_del(&adapter->napi[i]);
834 kfree(adapter->napi);
835 adapter->napi = NULL;
836 adapter->num_active_rx_napi = 0;
837 adapter->napi_enabled = false;
840 static int ibmvnic_login(struct net_device *netdev)
842 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
843 unsigned long timeout = msecs_to_jiffies(30000);
851 if (retry_count > retries) {
852 netdev_warn(netdev, "Login attempts exceeded\n");
856 adapter->init_done_rc = 0;
857 reinit_completion(&adapter->init_done);
858 rc = send_login(adapter);
860 netdev_warn(netdev, "Unable to login\n");
864 if (!wait_for_completion_timeout(&adapter->init_done,
866 netdev_warn(netdev, "Login timed out, retrying...\n");
868 adapter->init_done_rc = 0;
873 if (adapter->init_done_rc == ABORTED) {
874 netdev_warn(netdev, "Login aborted, retrying...\n");
876 adapter->init_done_rc = 0;
878 /* FW or device may be busy, so
879 * wait a bit before retrying login
882 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
884 release_sub_crqs(adapter, 1);
888 "Received partial success, retrying...\n");
889 adapter->init_done_rc = 0;
890 reinit_completion(&adapter->init_done);
891 send_cap_queries(adapter);
892 if (!wait_for_completion_timeout(&adapter->init_done,
895 "Capabilities query timed out\n");
899 rc = init_sub_crqs(adapter);
902 "SCRQ initialization failed\n");
906 rc = init_sub_crq_irqs(adapter);
909 "SCRQ irq initialization failed\n");
912 } else if (adapter->init_done_rc) {
913 netdev_warn(netdev, "Adapter login failed\n");
918 __ibmvnic_set_mac(netdev, adapter->mac_addr);
923 static void release_login_buffer(struct ibmvnic_adapter *adapter)
925 kfree(adapter->login_buf);
926 adapter->login_buf = NULL;
929 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
931 kfree(adapter->login_rsp_buf);
932 adapter->login_rsp_buf = NULL;
935 static void release_resources(struct ibmvnic_adapter *adapter)
937 release_vpd_data(adapter);
939 release_tx_pools(adapter);
940 release_rx_pools(adapter);
942 release_napi(adapter);
943 release_login_rsp_buffer(adapter);
946 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
948 struct net_device *netdev = adapter->netdev;
949 unsigned long timeout = msecs_to_jiffies(30000);
950 union ibmvnic_crq crq;
954 netdev_dbg(netdev, "setting link state %d\n", link_state);
956 memset(&crq, 0, sizeof(crq));
957 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
958 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
959 crq.logical_link_state.link_state = link_state;
964 reinit_completion(&adapter->init_done);
965 rc = ibmvnic_send_crq(adapter, &crq);
967 netdev_err(netdev, "Failed to set link state\n");
971 if (!wait_for_completion_timeout(&adapter->init_done,
973 netdev_err(netdev, "timeout setting link state\n");
977 if (adapter->init_done_rc == 1) {
978 /* Partuial success, delay and re-send */
981 } else if (adapter->init_done_rc) {
982 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
983 adapter->init_done_rc);
984 return adapter->init_done_rc;
991 static int set_real_num_queues(struct net_device *netdev)
993 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
996 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
997 adapter->req_tx_queues, adapter->req_rx_queues);
999 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1001 netdev_err(netdev, "failed to set the number of tx queues\n");
1005 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1007 netdev_err(netdev, "failed to set the number of rx queues\n");
1012 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1014 struct device *dev = &adapter->vdev->dev;
1015 union ibmvnic_crq crq;
1019 if (adapter->vpd->buff)
1020 len = adapter->vpd->len;
1022 mutex_lock(&adapter->fw_lock);
1023 adapter->fw_done_rc = 0;
1024 reinit_completion(&adapter->fw_done);
1026 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1027 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1028 rc = ibmvnic_send_crq(adapter, &crq);
1030 mutex_unlock(&adapter->fw_lock);
1034 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1036 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1037 mutex_unlock(&adapter->fw_lock);
1040 mutex_unlock(&adapter->fw_lock);
1042 if (!adapter->vpd->len)
1045 if (!adapter->vpd->buff)
1046 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1047 else if (adapter->vpd->len != len)
1048 adapter->vpd->buff =
1049 krealloc(adapter->vpd->buff,
1050 adapter->vpd->len, GFP_KERNEL);
1052 if (!adapter->vpd->buff) {
1053 dev_err(dev, "Could allocate VPD buffer\n");
1057 adapter->vpd->dma_addr =
1058 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1060 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1061 dev_err(dev, "Could not map VPD buffer\n");
1062 kfree(adapter->vpd->buff);
1063 adapter->vpd->buff = NULL;
1067 mutex_lock(&adapter->fw_lock);
1068 adapter->fw_done_rc = 0;
1069 reinit_completion(&adapter->fw_done);
1071 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1072 crq.get_vpd.cmd = GET_VPD;
1073 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1074 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1075 rc = ibmvnic_send_crq(adapter, &crq);
1077 kfree(adapter->vpd->buff);
1078 adapter->vpd->buff = NULL;
1079 mutex_unlock(&adapter->fw_lock);
1083 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1085 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1086 kfree(adapter->vpd->buff);
1087 adapter->vpd->buff = NULL;
1088 mutex_unlock(&adapter->fw_lock);
1092 mutex_unlock(&adapter->fw_lock);
1096 static int init_resources(struct ibmvnic_adapter *adapter)
1098 struct net_device *netdev = adapter->netdev;
1101 rc = set_real_num_queues(netdev);
1105 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1109 /* Vital Product Data (VPD) */
1110 rc = ibmvnic_get_vpd(adapter);
1112 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1116 adapter->map_id = 1;
1118 rc = init_napi(adapter);
1122 send_map_query(adapter);
1124 rc = init_rx_pools(netdev);
1128 rc = init_tx_pools(netdev);
1132 static int __ibmvnic_open(struct net_device *netdev)
1134 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1135 enum vnic_state prev_state = adapter->state;
1138 adapter->state = VNIC_OPENING;
1139 replenish_pools(adapter);
1140 ibmvnic_napi_enable(adapter);
1142 /* We're ready to receive frames, enable the sub-crq interrupts and
1143 * set the logical link state to up
1145 for (i = 0; i < adapter->req_rx_queues; i++) {
1146 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1147 if (prev_state == VNIC_CLOSED)
1148 enable_irq(adapter->rx_scrq[i]->irq);
1149 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1152 for (i = 0; i < adapter->req_tx_queues; i++) {
1153 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1154 if (prev_state == VNIC_CLOSED)
1155 enable_irq(adapter->tx_scrq[i]->irq);
1156 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1159 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1161 for (i = 0; i < adapter->req_rx_queues; i++)
1162 napi_disable(&adapter->napi[i]);
1163 release_resources(adapter);
1167 netif_tx_start_all_queues(netdev);
1169 if (prev_state == VNIC_CLOSED) {
1170 for (i = 0; i < adapter->req_rx_queues; i++)
1171 napi_schedule(&adapter->napi[i]);
1174 adapter->state = VNIC_OPEN;
1178 static int ibmvnic_open(struct net_device *netdev)
1180 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1183 /* If device failover is pending, just set device state and return.
1184 * Device operation will be handled by reset routine.
1186 if (adapter->failover_pending) {
1187 adapter->state = VNIC_OPEN;
1191 if (adapter->state != VNIC_CLOSED) {
1192 rc = ibmvnic_login(netdev);
1196 rc = init_resources(adapter);
1198 netdev_err(netdev, "failed to initialize resources\n");
1199 release_resources(adapter);
1204 rc = __ibmvnic_open(netdev);
1209 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1211 struct ibmvnic_rx_pool *rx_pool;
1212 struct ibmvnic_rx_buff *rx_buff;
1217 if (!adapter->rx_pool)
1220 rx_scrqs = adapter->num_active_rx_pools;
1221 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1223 /* Free any remaining skbs in the rx buffer pools */
1224 for (i = 0; i < rx_scrqs; i++) {
1225 rx_pool = &adapter->rx_pool[i];
1226 if (!rx_pool || !rx_pool->rx_buff)
1229 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1230 for (j = 0; j < rx_entries; j++) {
1231 rx_buff = &rx_pool->rx_buff[j];
1232 if (rx_buff && rx_buff->skb) {
1233 dev_kfree_skb_any(rx_buff->skb);
1234 rx_buff->skb = NULL;
1240 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1241 struct ibmvnic_tx_pool *tx_pool)
1243 struct ibmvnic_tx_buff *tx_buff;
1247 if (!tx_pool || !tx_pool->tx_buff)
1250 tx_entries = tx_pool->num_buffers;
1252 for (i = 0; i < tx_entries; i++) {
1253 tx_buff = &tx_pool->tx_buff[i];
1254 if (tx_buff && tx_buff->skb) {
1255 dev_kfree_skb_any(tx_buff->skb);
1256 tx_buff->skb = NULL;
1261 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1266 if (!adapter->tx_pool || !adapter->tso_pool)
1269 tx_scrqs = adapter->num_active_tx_pools;
1271 /* Free any remaining skbs in the tx buffer pools */
1272 for (i = 0; i < tx_scrqs; i++) {
1273 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1274 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1275 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1279 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1281 struct net_device *netdev = adapter->netdev;
1284 if (adapter->tx_scrq) {
1285 for (i = 0; i < adapter->req_tx_queues; i++)
1286 if (adapter->tx_scrq[i]->irq) {
1288 "Disabling tx_scrq[%d] irq\n", i);
1289 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1290 disable_irq(adapter->tx_scrq[i]->irq);
1294 if (adapter->rx_scrq) {
1295 for (i = 0; i < adapter->req_rx_queues; i++) {
1296 if (adapter->rx_scrq[i]->irq) {
1298 "Disabling rx_scrq[%d] irq\n", i);
1299 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1300 disable_irq(adapter->rx_scrq[i]->irq);
1306 static void ibmvnic_cleanup(struct net_device *netdev)
1308 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1310 /* ensure that transmissions are stopped if called by do_reset */
1311 if (test_bit(0, &adapter->resetting))
1312 netif_tx_disable(netdev);
1314 netif_tx_stop_all_queues(netdev);
1316 ibmvnic_napi_disable(adapter);
1317 ibmvnic_disable_irqs(adapter);
1319 clean_rx_pools(adapter);
1320 clean_tx_pools(adapter);
1323 static int __ibmvnic_close(struct net_device *netdev)
1325 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1328 adapter->state = VNIC_CLOSING;
1329 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1332 adapter->state = VNIC_CLOSED;
1336 static int ibmvnic_close(struct net_device *netdev)
1338 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1341 /* If device failover is pending, just set device state and return.
1342 * Device operation will be handled by reset routine.
1344 if (adapter->failover_pending) {
1345 adapter->state = VNIC_CLOSED;
1349 rc = __ibmvnic_close(netdev);
1350 ibmvnic_cleanup(netdev);
1356 * build_hdr_data - creates L2/L3/L4 header data buffer
1357 * @hdr_field - bitfield determining needed headers
1358 * @skb - socket buffer
1359 * @hdr_len - array of header lengths
1360 * @tot_len - total length of data
1362 * Reads hdr_field to determine which headers are needed by firmware.
1363 * Builds a buffer containing these headers. Saves individual header
1364 * lengths and total buffer length to be used to build descriptors.
1366 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1367 int *hdr_len, u8 *hdr_data)
1372 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1373 hdr_len[0] = sizeof(struct vlan_ethhdr);
1375 hdr_len[0] = sizeof(struct ethhdr);
1377 if (skb->protocol == htons(ETH_P_IP)) {
1378 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1379 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1380 hdr_len[2] = tcp_hdrlen(skb);
1381 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1382 hdr_len[2] = sizeof(struct udphdr);
1383 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1384 hdr_len[1] = sizeof(struct ipv6hdr);
1385 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1386 hdr_len[2] = tcp_hdrlen(skb);
1387 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1388 hdr_len[2] = sizeof(struct udphdr);
1389 } else if (skb->protocol == htons(ETH_P_ARP)) {
1390 hdr_len[1] = arp_hdr_len(skb->dev);
1394 memset(hdr_data, 0, 120);
1395 if ((hdr_field >> 6) & 1) {
1396 hdr = skb_mac_header(skb);
1397 memcpy(hdr_data, hdr, hdr_len[0]);
1401 if ((hdr_field >> 5) & 1) {
1402 hdr = skb_network_header(skb);
1403 memcpy(hdr_data + len, hdr, hdr_len[1]);
1407 if ((hdr_field >> 4) & 1) {
1408 hdr = skb_transport_header(skb);
1409 memcpy(hdr_data + len, hdr, hdr_len[2]);
1416 * create_hdr_descs - create header and header extension descriptors
1417 * @hdr_field - bitfield determining needed headers
1418 * @data - buffer containing header data
1419 * @len - length of data buffer
1420 * @hdr_len - array of individual header lengths
1421 * @scrq_arr - descriptor array
1423 * Creates header and, if needed, header extension descriptors and
1424 * places them in a descriptor array, scrq_arr
1427 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1428 union sub_crq *scrq_arr)
1430 union sub_crq hdr_desc;
1436 while (tmp_len > 0) {
1437 cur = hdr_data + len - tmp_len;
1439 memset(&hdr_desc, 0, sizeof(hdr_desc));
1440 if (cur != hdr_data) {
1441 data = hdr_desc.hdr_ext.data;
1442 tmp = tmp_len > 29 ? 29 : tmp_len;
1443 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1444 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1445 hdr_desc.hdr_ext.len = tmp;
1447 data = hdr_desc.hdr.data;
1448 tmp = tmp_len > 24 ? 24 : tmp_len;
1449 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1450 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1451 hdr_desc.hdr.len = tmp;
1452 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1453 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1454 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1455 hdr_desc.hdr.flag = hdr_field << 1;
1457 memcpy(data, cur, tmp);
1459 *scrq_arr = hdr_desc;
1468 * build_hdr_descs_arr - build a header descriptor array
1469 * @skb - socket buffer
1470 * @num_entries - number of descriptors to be sent
1471 * @subcrq - first TX descriptor
1472 * @hdr_field - bit field determining which headers will be sent
1474 * This function will build a TX descriptor array with applicable
1475 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1478 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1479 int *num_entries, u8 hdr_field)
1481 int hdr_len[3] = {0, 0, 0};
1483 u8 *hdr_data = txbuff->hdr_data;
1485 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1487 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1488 txbuff->indir_arr + 1);
1491 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1492 struct net_device *netdev)
1494 /* For some backing devices, mishandling of small packets
1495 * can result in a loss of connection or TX stall. Device
1496 * architects recommend that no packet should be smaller
1497 * than the minimum MTU value provided to the driver, so
1498 * pad any packets to that length
1500 if (skb->len < netdev->min_mtu)
1501 return skb_put_padto(skb, netdev->min_mtu);
1506 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1508 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1509 int queue_num = skb_get_queue_mapping(skb);
1510 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1511 struct device *dev = &adapter->vdev->dev;
1512 struct ibmvnic_tx_buff *tx_buff = NULL;
1513 struct ibmvnic_sub_crq_queue *tx_scrq;
1514 struct ibmvnic_tx_pool *tx_pool;
1515 unsigned int tx_send_failed = 0;
1516 unsigned int tx_map_failed = 0;
1517 unsigned int tx_dropped = 0;
1518 unsigned int tx_packets = 0;
1519 unsigned int tx_bytes = 0;
1520 dma_addr_t data_dma_addr;
1521 struct netdev_queue *txq;
1522 unsigned long lpar_rc;
1523 union sub_crq tx_crq;
1524 unsigned int offset;
1525 int num_entries = 1;
1530 netdev_tx_t ret = NETDEV_TX_OK;
1532 if (test_bit(0, &adapter->resetting)) {
1533 if (!netif_subqueue_stopped(netdev, skb))
1534 netif_stop_subqueue(netdev, queue_num);
1535 dev_kfree_skb_any(skb);
1543 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1549 if (skb_is_gso(skb))
1550 tx_pool = &adapter->tso_pool[queue_num];
1552 tx_pool = &adapter->tx_pool[queue_num];
1554 tx_scrq = adapter->tx_scrq[queue_num];
1555 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1556 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1557 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1559 index = tx_pool->free_map[tx_pool->consumer_index];
1561 if (index == IBMVNIC_INVALID_MAP) {
1562 dev_kfree_skb_any(skb);
1569 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1571 offset = index * tx_pool->buf_size;
1572 dst = tx_pool->long_term_buff.buff + offset;
1573 memset(dst, 0, tx_pool->buf_size);
1574 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1576 if (skb_shinfo(skb)->nr_frags) {
1580 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1581 cur = skb_headlen(skb);
1583 /* Copy the frags */
1584 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1585 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1588 page_address(skb_frag_page(frag)) +
1589 skb_frag_off(frag), skb_frag_size(frag));
1590 cur += skb_frag_size(frag);
1593 skb_copy_from_linear_data(skb, dst, skb->len);
1596 tx_pool->consumer_index =
1597 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1599 tx_buff = &tx_pool->tx_buff[index];
1601 tx_buff->data_dma[0] = data_dma_addr;
1602 tx_buff->data_len[0] = skb->len;
1603 tx_buff->index = index;
1604 tx_buff->pool_index = queue_num;
1605 tx_buff->last_frag = true;
1607 memset(&tx_crq, 0, sizeof(tx_crq));
1608 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1609 tx_crq.v1.type = IBMVNIC_TX_DESC;
1610 tx_crq.v1.n_crq_elem = 1;
1611 tx_crq.v1.n_sge = 1;
1612 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1614 if (skb_is_gso(skb))
1615 tx_crq.v1.correlator =
1616 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1618 tx_crq.v1.correlator = cpu_to_be32(index);
1619 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1620 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1621 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1623 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1624 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1625 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1628 if (skb->protocol == htons(ETH_P_IP)) {
1629 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1630 proto = ip_hdr(skb)->protocol;
1631 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1632 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1633 proto = ipv6_hdr(skb)->nexthdr;
1636 if (proto == IPPROTO_TCP)
1637 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1638 else if (proto == IPPROTO_UDP)
1639 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1641 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1642 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1645 if (skb_is_gso(skb)) {
1646 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1647 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1650 /* determine if l2/3/4 headers are sent to firmware */
1651 if ((*hdrs >> 7) & 1) {
1652 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1653 tx_crq.v1.n_crq_elem = num_entries;
1654 tx_buff->num_entries = num_entries;
1655 tx_buff->indir_arr[0] = tx_crq;
1656 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1657 sizeof(tx_buff->indir_arr),
1659 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1660 dev_kfree_skb_any(skb);
1661 tx_buff->skb = NULL;
1662 if (!firmware_has_feature(FW_FEATURE_CMO))
1663 dev_err(dev, "tx: unable to map descriptor array\n");
1669 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1670 (u64)tx_buff->indir_dma,
1672 dma_unmap_single(dev, tx_buff->indir_dma,
1673 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1675 tx_buff->num_entries = num_entries;
1676 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1679 if (lpar_rc != H_SUCCESS) {
1680 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1681 dev_err_ratelimited(dev, "tx: send failed\n");
1682 dev_kfree_skb_any(skb);
1683 tx_buff->skb = NULL;
1685 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1686 /* Disable TX and report carrier off if queue is closed
1687 * or pending failover.
1688 * Firmware guarantees that a signal will be sent to the
1689 * driver, triggering a reset or some other action.
1691 netif_tx_stop_all_queues(netdev);
1692 netif_carrier_off(netdev);
1701 if (atomic_add_return(num_entries, &tx_scrq->used)
1702 >= adapter->req_tx_entries_per_subcrq) {
1703 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1704 netif_stop_subqueue(netdev, queue_num);
1708 tx_bytes += skb->len;
1709 txq->trans_start = jiffies;
1714 /* roll back consumer index and map array*/
1715 if (tx_pool->consumer_index == 0)
1716 tx_pool->consumer_index =
1717 tx_pool->num_buffers - 1;
1719 tx_pool->consumer_index--;
1720 tx_pool->free_map[tx_pool->consumer_index] = index;
1722 netdev->stats.tx_dropped += tx_dropped;
1723 netdev->stats.tx_bytes += tx_bytes;
1724 netdev->stats.tx_packets += tx_packets;
1725 adapter->tx_send_failed += tx_send_failed;
1726 adapter->tx_map_failed += tx_map_failed;
1727 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1728 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1729 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1734 static void ibmvnic_set_multi(struct net_device *netdev)
1736 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1737 struct netdev_hw_addr *ha;
1738 union ibmvnic_crq crq;
1740 memset(&crq, 0, sizeof(crq));
1741 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1742 crq.request_capability.cmd = REQUEST_CAPABILITY;
1744 if (netdev->flags & IFF_PROMISC) {
1745 if (!adapter->promisc_supported)
1748 if (netdev->flags & IFF_ALLMULTI) {
1749 /* Accept all multicast */
1750 memset(&crq, 0, sizeof(crq));
1751 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1752 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1753 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1754 ibmvnic_send_crq(adapter, &crq);
1755 } else if (netdev_mc_empty(netdev)) {
1756 /* Reject all multicast */
1757 memset(&crq, 0, sizeof(crq));
1758 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1759 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1760 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1761 ibmvnic_send_crq(adapter, &crq);
1763 /* Accept one or more multicast(s) */
1764 netdev_for_each_mc_addr(ha, netdev) {
1765 memset(&crq, 0, sizeof(crq));
1766 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1767 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1768 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1769 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1771 ibmvnic_send_crq(adapter, &crq);
1777 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1779 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1780 union ibmvnic_crq crq;
1783 if (!is_valid_ether_addr(dev_addr)) {
1784 rc = -EADDRNOTAVAIL;
1788 memset(&crq, 0, sizeof(crq));
1789 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1790 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1791 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1793 mutex_lock(&adapter->fw_lock);
1794 adapter->fw_done_rc = 0;
1795 reinit_completion(&adapter->fw_done);
1797 rc = ibmvnic_send_crq(adapter, &crq);
1800 mutex_unlock(&adapter->fw_lock);
1804 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1805 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1806 if (rc || adapter->fw_done_rc) {
1808 mutex_unlock(&adapter->fw_lock);
1811 mutex_unlock(&adapter->fw_lock);
1814 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1818 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1820 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1821 struct sockaddr *addr = p;
1825 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1826 if (adapter->state != VNIC_PROBED)
1827 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1833 * do_change_param_reset returns zero if we are able to keep processing reset
1834 * events, or non-zero if we hit a fatal error and must halt.
1836 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1837 struct ibmvnic_rwi *rwi,
1840 struct net_device *netdev = adapter->netdev;
1843 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1846 netif_carrier_off(netdev);
1847 adapter->reset_reason = rwi->reset_reason;
1849 ibmvnic_cleanup(netdev);
1851 if (reset_state == VNIC_OPEN) {
1852 rc = __ibmvnic_close(netdev);
1857 release_resources(adapter);
1858 release_sub_crqs(adapter, 1);
1859 release_crq_queue(adapter);
1861 adapter->state = VNIC_PROBED;
1863 rc = init_crq_queue(adapter);
1866 netdev_err(adapter->netdev,
1867 "Couldn't initialize crq. rc=%d\n", rc);
1871 rc = ibmvnic_reset_init(adapter);
1873 return IBMVNIC_INIT_FAILED;
1875 /* If the adapter was in PROBE state prior to the reset,
1878 if (reset_state == VNIC_PROBED)
1881 rc = ibmvnic_login(netdev);
1883 adapter->state = reset_state;
1887 rc = init_resources(adapter);
1891 ibmvnic_disable_irqs(adapter);
1893 adapter->state = VNIC_CLOSED;
1895 if (reset_state == VNIC_CLOSED)
1898 rc = __ibmvnic_open(netdev);
1900 return IBMVNIC_OPEN_FAILED;
1902 /* refresh device's multicast list */
1903 ibmvnic_set_multi(netdev);
1906 for (i = 0; i < adapter->req_rx_queues; i++)
1907 napi_schedule(&adapter->napi[i]);
1913 * do_reset returns zero if we are able to keep processing reset events, or
1914 * non-zero if we hit a fatal error and must halt.
1916 static int do_reset(struct ibmvnic_adapter *adapter,
1917 struct ibmvnic_rwi *rwi, u32 reset_state)
1919 u64 old_num_rx_queues, old_num_tx_queues;
1920 u64 old_num_rx_slots, old_num_tx_slots;
1921 struct net_device *netdev = adapter->netdev;
1924 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1929 netif_carrier_off(netdev);
1930 adapter->reset_reason = rwi->reset_reason;
1932 old_num_rx_queues = adapter->req_rx_queues;
1933 old_num_tx_queues = adapter->req_tx_queues;
1934 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1935 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1937 ibmvnic_cleanup(netdev);
1939 if (reset_state == VNIC_OPEN &&
1940 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1941 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1942 adapter->state = VNIC_CLOSING;
1944 /* Release the RTNL lock before link state change and
1945 * re-acquire after the link state change to allow
1946 * linkwatch_event to grab the RTNL lock and run during
1950 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1955 if (adapter->state != VNIC_CLOSING) {
1960 adapter->state = VNIC_CLOSED;
1963 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1964 /* remove the closed state so when we call open it appears
1965 * we are coming from the probed state.
1967 adapter->state = VNIC_PROBED;
1969 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1970 rc = ibmvnic_reenable_crq_queue(adapter);
1971 release_sub_crqs(adapter, 1);
1973 rc = ibmvnic_reset_crq(adapter);
1974 if (rc == H_CLOSED || rc == H_SUCCESS) {
1975 rc = vio_enable_interrupts(adapter->vdev);
1977 netdev_err(adapter->netdev,
1978 "Reset failed to enable interrupts. rc=%d\n",
1984 netdev_err(adapter->netdev,
1985 "Reset couldn't initialize crq. rc=%d\n", rc);
1989 rc = ibmvnic_reset_init(adapter);
1991 rc = IBMVNIC_INIT_FAILED;
1995 /* If the adapter was in PROBE state prior to the reset,
1998 if (reset_state == VNIC_PROBED) {
2003 rc = ibmvnic_login(netdev);
2005 adapter->state = reset_state;
2009 if (adapter->req_rx_queues != old_num_rx_queues ||
2010 adapter->req_tx_queues != old_num_tx_queues ||
2011 adapter->req_rx_add_entries_per_subcrq !=
2013 adapter->req_tx_entries_per_subcrq !=
2015 release_rx_pools(adapter);
2016 release_tx_pools(adapter);
2017 release_napi(adapter);
2018 release_vpd_data(adapter);
2020 rc = init_resources(adapter);
2025 rc = reset_tx_pools(adapter);
2029 rc = reset_rx_pools(adapter);
2033 ibmvnic_disable_irqs(adapter);
2035 adapter->state = VNIC_CLOSED;
2037 if (reset_state == VNIC_CLOSED) {
2042 rc = __ibmvnic_open(netdev);
2044 rc = IBMVNIC_OPEN_FAILED;
2048 /* refresh device's multicast list */
2049 ibmvnic_set_multi(netdev);
2052 for (i = 0; i < adapter->req_rx_queues; i++)
2053 napi_schedule(&adapter->napi[i]);
2055 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
2056 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2066 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2067 struct ibmvnic_rwi *rwi, u32 reset_state)
2069 struct net_device *netdev = adapter->netdev;
2072 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2075 netif_carrier_off(netdev);
2076 adapter->reset_reason = rwi->reset_reason;
2078 ibmvnic_cleanup(netdev);
2079 release_resources(adapter);
2080 release_sub_crqs(adapter, 0);
2081 release_crq_queue(adapter);
2083 /* remove the closed state so when we call open it appears
2084 * we are coming from the probed state.
2086 adapter->state = VNIC_PROBED;
2088 reinit_completion(&adapter->init_done);
2089 rc = init_crq_queue(adapter);
2091 netdev_err(adapter->netdev,
2092 "Couldn't initialize crq. rc=%d\n", rc);
2096 rc = ibmvnic_init(adapter);
2100 /* If the adapter was in PROBE state prior to the reset,
2103 if (reset_state == VNIC_PROBED)
2106 rc = ibmvnic_login(netdev);
2108 adapter->state = VNIC_PROBED;
2112 rc = init_resources(adapter);
2116 ibmvnic_disable_irqs(adapter);
2117 adapter->state = VNIC_CLOSED;
2119 if (reset_state == VNIC_CLOSED)
2122 rc = __ibmvnic_open(netdev);
2124 return IBMVNIC_OPEN_FAILED;
2129 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2131 struct ibmvnic_rwi *rwi;
2132 unsigned long flags;
2134 spin_lock_irqsave(&adapter->rwi_lock, flags);
2136 if (!list_empty(&adapter->rwi_list)) {
2137 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2139 list_del(&rwi->list);
2144 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2148 static void free_all_rwi(struct ibmvnic_adapter *adapter)
2150 struct ibmvnic_rwi *rwi;
2152 rwi = get_next_rwi(adapter);
2155 rwi = get_next_rwi(adapter);
2159 static void __ibmvnic_reset(struct work_struct *work)
2161 struct ibmvnic_rwi *rwi;
2162 struct ibmvnic_adapter *adapter;
2163 bool saved_state = false;
2164 unsigned long flags;
2168 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2170 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2171 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2172 IBMVNIC_RESET_DELAY);
2176 rwi = get_next_rwi(adapter);
2178 spin_lock_irqsave(&adapter->state_lock, flags);
2180 if (adapter->state == VNIC_REMOVING ||
2181 adapter->state == VNIC_REMOVED) {
2182 spin_unlock_irqrestore(&adapter->state_lock, flags);
2189 reset_state = adapter->state;
2190 adapter->state = VNIC_RESETTING;
2193 spin_unlock_irqrestore(&adapter->state_lock, flags);
2195 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2196 /* CHANGE_PARAM requestor holds rtnl_lock */
2197 rc = do_change_param_reset(adapter, rwi, reset_state);
2198 } else if (adapter->force_reset_recovery) {
2199 /* Transport event occurred during previous reset */
2200 if (adapter->wait_for_reset) {
2201 /* Previous was CHANGE_PARAM; caller locked */
2202 adapter->force_reset_recovery = false;
2203 rc = do_hard_reset(adapter, rwi, reset_state);
2206 adapter->force_reset_recovery = false;
2207 rc = do_hard_reset(adapter, rwi, reset_state);
2210 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2211 adapter->from_passive_init)) {
2212 rc = do_reset(adapter, rwi, reset_state);
2215 if (rc == IBMVNIC_OPEN_FAILED) {
2216 if (list_empty(&adapter->rwi_list))
2217 adapter->state = VNIC_CLOSED;
2219 adapter->state = reset_state;
2221 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
2222 !adapter->force_reset_recovery)
2225 rwi = get_next_rwi(adapter);
2227 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2228 rwi->reset_reason == VNIC_RESET_MOBILITY))
2229 adapter->force_reset_recovery = true;
2232 if (adapter->wait_for_reset) {
2233 adapter->reset_done_rc = rc;
2234 complete(&adapter->reset_done);
2238 netdev_dbg(adapter->netdev, "Reset failed\n");
2239 free_all_rwi(adapter);
2242 clear_bit_unlock(0, &adapter->resetting);
2245 static void __ibmvnic_delayed_reset(struct work_struct *work)
2247 struct ibmvnic_adapter *adapter;
2249 adapter = container_of(work, struct ibmvnic_adapter,
2250 ibmvnic_delayed_reset.work);
2251 __ibmvnic_reset(&adapter->ibmvnic_reset);
2254 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2255 enum ibmvnic_reset_reason reason)
2257 struct list_head *entry, *tmp_entry;
2258 struct ibmvnic_rwi *rwi, *tmp;
2259 struct net_device *netdev = adapter->netdev;
2260 unsigned long flags;
2263 if (adapter->state == VNIC_REMOVING ||
2264 adapter->state == VNIC_REMOVED ||
2265 adapter->failover_pending) {
2267 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2271 if (adapter->state == VNIC_PROBING) {
2272 netdev_warn(netdev, "Adapter reset during probe\n");
2273 ret = adapter->init_done_rc = EAGAIN;
2277 spin_lock_irqsave(&adapter->rwi_lock, flags);
2279 list_for_each(entry, &adapter->rwi_list) {
2280 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2281 if (tmp->reset_reason == reason) {
2282 netdev_dbg(netdev, "Skipping matching reset\n");
2283 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2289 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2291 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2292 ibmvnic_close(netdev);
2296 /* if we just received a transport event,
2297 * flush reset queue and process this reset
2299 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2300 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2303 rwi->reset_reason = reason;
2304 list_add_tail(&rwi->list, &adapter->rwi_list);
2305 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2306 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2307 schedule_work(&adapter->ibmvnic_reset);
2314 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2316 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2318 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2321 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2322 struct ibmvnic_rx_buff *rx_buff)
2324 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2326 rx_buff->skb = NULL;
2328 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2329 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2331 atomic_dec(&pool->available);
2334 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2336 struct net_device *netdev = napi->dev;
2337 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2338 int scrq_num = (int)(napi - adapter->napi);
2339 int frames_processed = 0;
2342 while (frames_processed < budget) {
2343 struct sk_buff *skb;
2344 struct ibmvnic_rx_buff *rx_buff;
2345 union sub_crq *next;
2350 if (unlikely(test_bit(0, &adapter->resetting) &&
2351 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2352 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2353 napi_complete_done(napi, frames_processed);
2354 return frames_processed;
2357 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2359 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2361 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2362 rx_comp.correlator);
2363 /* do error checking */
2364 if (next->rx_comp.rc) {
2365 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2366 be16_to_cpu(next->rx_comp.rc));
2367 /* free the entry */
2368 next->rx_comp.first = 0;
2369 dev_kfree_skb_any(rx_buff->skb);
2370 remove_buff_from_pool(adapter, rx_buff);
2372 } else if (!rx_buff->skb) {
2373 /* free the entry */
2374 next->rx_comp.first = 0;
2375 remove_buff_from_pool(adapter, rx_buff);
2379 length = be32_to_cpu(next->rx_comp.len);
2380 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2381 flags = next->rx_comp.flags;
2383 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2386 /* VLAN Header has been stripped by the system firmware and
2387 * needs to be inserted by the driver
2389 if (adapter->rx_vlan_header_insertion &&
2390 (flags & IBMVNIC_VLAN_STRIPPED))
2391 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2392 ntohs(next->rx_comp.vlan_tci));
2394 /* free the entry */
2395 next->rx_comp.first = 0;
2396 remove_buff_from_pool(adapter, rx_buff);
2398 skb_put(skb, length);
2399 skb->protocol = eth_type_trans(skb, netdev);
2400 skb_record_rx_queue(skb, scrq_num);
2402 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2403 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2404 skb->ip_summed = CHECKSUM_UNNECESSARY;
2408 napi_gro_receive(napi, skb); /* send it up */
2409 netdev->stats.rx_packets++;
2410 netdev->stats.rx_bytes += length;
2411 adapter->rx_stats_buffers[scrq_num].packets++;
2412 adapter->rx_stats_buffers[scrq_num].bytes += length;
2416 if (adapter->state != VNIC_CLOSING)
2417 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2419 if (frames_processed < budget) {
2420 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2421 napi_complete_done(napi, frames_processed);
2422 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2423 napi_reschedule(napi)) {
2424 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2428 return frames_processed;
2431 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2435 adapter->fallback.mtu = adapter->req_mtu;
2436 adapter->fallback.rx_queues = adapter->req_rx_queues;
2437 adapter->fallback.tx_queues = adapter->req_tx_queues;
2438 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2439 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2441 reinit_completion(&adapter->reset_done);
2442 adapter->wait_for_reset = true;
2443 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2449 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2456 if (adapter->reset_done_rc) {
2458 adapter->desired.mtu = adapter->fallback.mtu;
2459 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2460 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2461 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2462 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2464 reinit_completion(&adapter->reset_done);
2465 adapter->wait_for_reset = true;
2466 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2471 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2479 adapter->wait_for_reset = false;
2484 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2486 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2488 adapter->desired.mtu = new_mtu + ETH_HLEN;
2490 return wait_for_reset(adapter);
2493 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2494 struct net_device *dev,
2495 netdev_features_t features)
2497 /* Some backing hardware adapters can not
2498 * handle packets with a MSS less than 224
2499 * or with only one segment.
2501 if (skb_is_gso(skb)) {
2502 if (skb_shinfo(skb)->gso_size < 224 ||
2503 skb_shinfo(skb)->gso_segs == 1)
2504 features &= ~NETIF_F_GSO_MASK;
2510 static const struct net_device_ops ibmvnic_netdev_ops = {
2511 .ndo_open = ibmvnic_open,
2512 .ndo_stop = ibmvnic_close,
2513 .ndo_start_xmit = ibmvnic_xmit,
2514 .ndo_set_rx_mode = ibmvnic_set_multi,
2515 .ndo_set_mac_address = ibmvnic_set_mac,
2516 .ndo_validate_addr = eth_validate_addr,
2517 .ndo_tx_timeout = ibmvnic_tx_timeout,
2518 .ndo_change_mtu = ibmvnic_change_mtu,
2519 .ndo_features_check = ibmvnic_features_check,
2522 /* ethtool functions */
2524 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2525 struct ethtool_link_ksettings *cmd)
2527 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2530 rc = send_query_phys_parms(adapter);
2532 adapter->speed = SPEED_UNKNOWN;
2533 adapter->duplex = DUPLEX_UNKNOWN;
2535 cmd->base.speed = adapter->speed;
2536 cmd->base.duplex = adapter->duplex;
2537 cmd->base.port = PORT_FIBRE;
2538 cmd->base.phy_address = 0;
2539 cmd->base.autoneg = AUTONEG_ENABLE;
2544 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2545 struct ethtool_drvinfo *info)
2547 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2549 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2550 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2551 strlcpy(info->fw_version, adapter->fw_version,
2552 sizeof(info->fw_version));
2555 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2557 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2559 return adapter->msg_enable;
2562 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2564 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2566 adapter->msg_enable = data;
2569 static u32 ibmvnic_get_link(struct net_device *netdev)
2571 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2573 /* Don't need to send a query because we request a logical link up at
2574 * init and then we wait for link state indications
2576 return adapter->logical_link_state;
2579 static void ibmvnic_get_ringparam(struct net_device *netdev,
2580 struct ethtool_ringparam *ring)
2582 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2584 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2585 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2586 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2588 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2589 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2591 ring->rx_mini_max_pending = 0;
2592 ring->rx_jumbo_max_pending = 0;
2593 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2594 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2595 ring->rx_mini_pending = 0;
2596 ring->rx_jumbo_pending = 0;
2599 static int ibmvnic_set_ringparam(struct net_device *netdev,
2600 struct ethtool_ringparam *ring)
2602 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2606 adapter->desired.rx_entries = ring->rx_pending;
2607 adapter->desired.tx_entries = ring->tx_pending;
2609 ret = wait_for_reset(adapter);
2612 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2613 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2615 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2616 ring->rx_pending, ring->tx_pending,
2617 adapter->req_rx_add_entries_per_subcrq,
2618 adapter->req_tx_entries_per_subcrq);
2622 static void ibmvnic_get_channels(struct net_device *netdev,
2623 struct ethtool_channels *channels)
2625 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2627 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2628 channels->max_rx = adapter->max_rx_queues;
2629 channels->max_tx = adapter->max_tx_queues;
2631 channels->max_rx = IBMVNIC_MAX_QUEUES;
2632 channels->max_tx = IBMVNIC_MAX_QUEUES;
2635 channels->max_other = 0;
2636 channels->max_combined = 0;
2637 channels->rx_count = adapter->req_rx_queues;
2638 channels->tx_count = adapter->req_tx_queues;
2639 channels->other_count = 0;
2640 channels->combined_count = 0;
2643 static int ibmvnic_set_channels(struct net_device *netdev,
2644 struct ethtool_channels *channels)
2646 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2650 adapter->desired.rx_queues = channels->rx_count;
2651 adapter->desired.tx_queues = channels->tx_count;
2653 ret = wait_for_reset(adapter);
2656 (adapter->req_rx_queues != channels->rx_count ||
2657 adapter->req_tx_queues != channels->tx_count))
2659 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2660 channels->rx_count, channels->tx_count,
2661 adapter->req_rx_queues, adapter->req_tx_queues);
2666 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2668 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2671 switch (stringset) {
2673 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2674 i++, data += ETH_GSTRING_LEN)
2675 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2677 for (i = 0; i < adapter->req_tx_queues; i++) {
2678 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2679 data += ETH_GSTRING_LEN;
2681 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2682 data += ETH_GSTRING_LEN;
2684 snprintf(data, ETH_GSTRING_LEN,
2685 "tx%d_dropped_packets", i);
2686 data += ETH_GSTRING_LEN;
2689 for (i = 0; i < adapter->req_rx_queues; i++) {
2690 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2691 data += ETH_GSTRING_LEN;
2693 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2694 data += ETH_GSTRING_LEN;
2696 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2697 data += ETH_GSTRING_LEN;
2701 case ETH_SS_PRIV_FLAGS:
2702 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2703 strcpy(data + i * ETH_GSTRING_LEN,
2704 ibmvnic_priv_flags[i]);
2711 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2713 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2717 return ARRAY_SIZE(ibmvnic_stats) +
2718 adapter->req_tx_queues * NUM_TX_STATS +
2719 adapter->req_rx_queues * NUM_RX_STATS;
2720 case ETH_SS_PRIV_FLAGS:
2721 return ARRAY_SIZE(ibmvnic_priv_flags);
2727 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2728 struct ethtool_stats *stats, u64 *data)
2730 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2731 union ibmvnic_crq crq;
2735 memset(&crq, 0, sizeof(crq));
2736 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2737 crq.request_statistics.cmd = REQUEST_STATISTICS;
2738 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2739 crq.request_statistics.len =
2740 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2742 /* Wait for data to be written */
2743 reinit_completion(&adapter->stats_done);
2744 rc = ibmvnic_send_crq(adapter, &crq);
2747 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2751 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2752 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2753 ibmvnic_stats[i].offset));
2755 for (j = 0; j < adapter->req_tx_queues; j++) {
2756 data[i] = adapter->tx_stats_buffers[j].packets;
2758 data[i] = adapter->tx_stats_buffers[j].bytes;
2760 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2764 for (j = 0; j < adapter->req_rx_queues; j++) {
2765 data[i] = adapter->rx_stats_buffers[j].packets;
2767 data[i] = adapter->rx_stats_buffers[j].bytes;
2769 data[i] = adapter->rx_stats_buffers[j].interrupts;
2774 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2776 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2778 return adapter->priv_flags;
2781 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2783 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2784 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2787 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2789 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2793 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2794 .get_drvinfo = ibmvnic_get_drvinfo,
2795 .get_msglevel = ibmvnic_get_msglevel,
2796 .set_msglevel = ibmvnic_set_msglevel,
2797 .get_link = ibmvnic_get_link,
2798 .get_ringparam = ibmvnic_get_ringparam,
2799 .set_ringparam = ibmvnic_set_ringparam,
2800 .get_channels = ibmvnic_get_channels,
2801 .set_channels = ibmvnic_set_channels,
2802 .get_strings = ibmvnic_get_strings,
2803 .get_sset_count = ibmvnic_get_sset_count,
2804 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2805 .get_link_ksettings = ibmvnic_get_link_ksettings,
2806 .get_priv_flags = ibmvnic_get_priv_flags,
2807 .set_priv_flags = ibmvnic_set_priv_flags,
2810 /* Routines for managing CRQs/sCRQs */
2812 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2813 struct ibmvnic_sub_crq_queue *scrq)
2818 free_irq(scrq->irq, scrq);
2819 irq_dispose_mapping(scrq->irq);
2823 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2824 atomic_set(&scrq->used, 0);
2827 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2828 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2832 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2836 for (i = 0; i < adapter->req_tx_queues; i++) {
2837 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2838 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2843 for (i = 0; i < adapter->req_rx_queues; i++) {
2844 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2845 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2853 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2854 struct ibmvnic_sub_crq_queue *scrq,
2857 struct device *dev = &adapter->vdev->dev;
2860 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2863 /* Close the sub-crqs */
2865 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2866 adapter->vdev->unit_address,
2868 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2871 netdev_err(adapter->netdev,
2872 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2877 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2879 free_pages((unsigned long)scrq->msgs, 2);
2883 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2886 struct device *dev = &adapter->vdev->dev;
2887 struct ibmvnic_sub_crq_queue *scrq;
2890 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2895 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2897 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2898 goto zero_page_failed;
2901 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2903 if (dma_mapping_error(dev, scrq->msg_token)) {
2904 dev_warn(dev, "Couldn't map crq queue messages page\n");
2908 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2909 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2911 if (rc == H_RESOURCE)
2912 rc = ibmvnic_reset_crq(adapter);
2914 if (rc == H_CLOSED) {
2915 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2917 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2921 scrq->adapter = adapter;
2922 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2923 spin_lock_init(&scrq->lock);
2925 netdev_dbg(adapter->netdev,
2926 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2927 scrq->crq_num, scrq->hw_irq, scrq->irq);
2932 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2935 free_pages((unsigned long)scrq->msgs, 2);
2942 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2946 if (adapter->tx_scrq) {
2947 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2948 if (!adapter->tx_scrq[i])
2951 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2953 if (adapter->tx_scrq[i]->irq) {
2954 free_irq(adapter->tx_scrq[i]->irq,
2955 adapter->tx_scrq[i]);
2956 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2957 adapter->tx_scrq[i]->irq = 0;
2960 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2964 kfree(adapter->tx_scrq);
2965 adapter->tx_scrq = NULL;
2966 adapter->num_active_tx_scrqs = 0;
2969 if (adapter->rx_scrq) {
2970 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2971 if (!adapter->rx_scrq[i])
2974 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2976 if (adapter->rx_scrq[i]->irq) {
2977 free_irq(adapter->rx_scrq[i]->irq,
2978 adapter->rx_scrq[i]);
2979 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2980 adapter->rx_scrq[i]->irq = 0;
2983 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2987 kfree(adapter->rx_scrq);
2988 adapter->rx_scrq = NULL;
2989 adapter->num_active_rx_scrqs = 0;
2993 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2994 struct ibmvnic_sub_crq_queue *scrq)
2996 struct device *dev = &adapter->vdev->dev;
2999 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3000 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3002 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3007 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3008 struct ibmvnic_sub_crq_queue *scrq)
3010 struct device *dev = &adapter->vdev->dev;
3013 if (scrq->hw_irq > 0x100000000ULL) {
3014 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3018 if (test_bit(0, &adapter->resetting) &&
3019 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3020 u64 val = (0xff000000) | scrq->hw_irq;
3022 rc = plpar_hcall_norets(H_EOI, val);
3023 /* H_EOI would fail with rc = H_FUNCTION when running
3024 * in XIVE mode which is expected, but not an error.
3026 if (rc && (rc != H_FUNCTION))
3027 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3031 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3032 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3034 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3039 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3040 struct ibmvnic_sub_crq_queue *scrq)
3042 struct device *dev = &adapter->vdev->dev;
3043 struct ibmvnic_tx_pool *tx_pool;
3044 struct ibmvnic_tx_buff *txbuff;
3045 union sub_crq *next;
3050 while (pending_scrq(adapter, scrq)) {
3051 unsigned int pool = scrq->pool_index;
3052 int num_entries = 0;
3054 next = ibmvnic_next_scrq(adapter, scrq);
3055 for (i = 0; i < next->tx_comp.num_comps; i++) {
3056 if (next->tx_comp.rcs[i]) {
3057 dev_err(dev, "tx error %x\n",
3058 next->tx_comp.rcs[i]);
3061 index = be32_to_cpu(next->tx_comp.correlators[i]);
3062 if (index & IBMVNIC_TSO_POOL_MASK) {
3063 tx_pool = &adapter->tso_pool[pool];
3064 index &= ~IBMVNIC_TSO_POOL_MASK;
3066 tx_pool = &adapter->tx_pool[pool];
3069 txbuff = &tx_pool->tx_buff[index];
3071 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3072 if (!txbuff->data_dma[j])
3075 txbuff->data_dma[j] = 0;
3078 if (txbuff->last_frag) {
3079 dev_kfree_skb_any(txbuff->skb);
3083 num_entries += txbuff->num_entries;
3085 tx_pool->free_map[tx_pool->producer_index] = index;
3086 tx_pool->producer_index =
3087 (tx_pool->producer_index + 1) %
3088 tx_pool->num_buffers;
3090 /* remove tx_comp scrq*/
3091 next->tx_comp.first = 0;
3093 if (atomic_sub_return(num_entries, &scrq->used) <=
3094 (adapter->req_tx_entries_per_subcrq / 2) &&
3095 __netif_subqueue_stopped(adapter->netdev,
3096 scrq->pool_index)) {
3097 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3098 netdev_dbg(adapter->netdev, "Started queue %d\n",
3103 enable_scrq_irq(adapter, scrq);
3105 if (pending_scrq(adapter, scrq)) {
3106 disable_scrq_irq(adapter, scrq);
3113 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3115 struct ibmvnic_sub_crq_queue *scrq = instance;
3116 struct ibmvnic_adapter *adapter = scrq->adapter;
3118 disable_scrq_irq(adapter, scrq);
3119 ibmvnic_complete_tx(adapter, scrq);
3124 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3126 struct ibmvnic_sub_crq_queue *scrq = instance;
3127 struct ibmvnic_adapter *adapter = scrq->adapter;
3129 /* When booting a kdump kernel we can hit pending interrupts
3130 * prior to completing driver initialization.
3132 if (unlikely(adapter->state != VNIC_OPEN))
3135 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3137 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3138 disable_scrq_irq(adapter, scrq);
3139 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3145 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3147 struct device *dev = &adapter->vdev->dev;
3148 struct ibmvnic_sub_crq_queue *scrq;
3152 for (i = 0; i < adapter->req_tx_queues; i++) {
3153 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3155 scrq = adapter->tx_scrq[i];
3156 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3160 dev_err(dev, "Error mapping irq\n");
3161 goto req_tx_irq_failed;
3164 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3165 adapter->vdev->unit_address, i);
3166 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3167 0, scrq->name, scrq);
3170 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3172 irq_dispose_mapping(scrq->irq);
3173 goto req_tx_irq_failed;
3177 for (i = 0; i < adapter->req_rx_queues; i++) {
3178 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3180 scrq = adapter->rx_scrq[i];
3181 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3184 dev_err(dev, "Error mapping irq\n");
3185 goto req_rx_irq_failed;
3187 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3188 adapter->vdev->unit_address, i);
3189 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3190 0, scrq->name, scrq);
3192 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3194 irq_dispose_mapping(scrq->irq);
3195 goto req_rx_irq_failed;
3201 for (j = 0; j < i; j++) {
3202 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3203 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3205 i = adapter->req_tx_queues;
3207 for (j = 0; j < i; j++) {
3208 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3209 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3211 release_sub_crqs(adapter, 1);
3215 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3217 struct device *dev = &adapter->vdev->dev;
3218 struct ibmvnic_sub_crq_queue **allqueues;
3219 int registered_queues = 0;
3224 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3226 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3230 for (i = 0; i < total_queues; i++) {
3231 allqueues[i] = init_sub_crq_queue(adapter);
3232 if (!allqueues[i]) {
3233 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3236 registered_queues++;
3239 /* Make sure we were able to register the minimum number of queues */
3240 if (registered_queues <
3241 adapter->min_tx_queues + adapter->min_rx_queues) {
3242 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3246 /* Distribute the failed allocated queues*/
3247 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3248 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3251 if (adapter->req_rx_queues > adapter->min_rx_queues)
3252 adapter->req_rx_queues--;
3257 if (adapter->req_tx_queues > adapter->min_tx_queues)
3258 adapter->req_tx_queues--;
3265 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3266 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3267 if (!adapter->tx_scrq)
3270 for (i = 0; i < adapter->req_tx_queues; i++) {
3271 adapter->tx_scrq[i] = allqueues[i];
3272 adapter->tx_scrq[i]->pool_index = i;
3273 adapter->num_active_tx_scrqs++;
3276 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3277 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3278 if (!adapter->rx_scrq)
3281 for (i = 0; i < adapter->req_rx_queues; i++) {
3282 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3283 adapter->rx_scrq[i]->scrq_num = i;
3284 adapter->num_active_rx_scrqs++;
3291 kfree(adapter->tx_scrq);
3292 adapter->tx_scrq = NULL;
3294 for (i = 0; i < registered_queues; i++)
3295 release_sub_crq_queue(adapter, allqueues[i], 1);
3300 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3302 struct device *dev = &adapter->vdev->dev;
3303 union ibmvnic_crq crq;
3307 /* Sub-CRQ entries are 32 byte long */
3308 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3310 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3311 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3312 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3316 if (adapter->desired.mtu)
3317 adapter->req_mtu = adapter->desired.mtu;
3319 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3321 if (!adapter->desired.tx_entries)
3322 adapter->desired.tx_entries =
3323 adapter->max_tx_entries_per_subcrq;
3324 if (!adapter->desired.rx_entries)
3325 adapter->desired.rx_entries =
3326 adapter->max_rx_add_entries_per_subcrq;
3328 max_entries = IBMVNIC_MAX_LTB_SIZE /
3329 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3331 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3332 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3333 adapter->desired.tx_entries = max_entries;
3336 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3337 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3338 adapter->desired.rx_entries = max_entries;
3341 if (adapter->desired.tx_entries)
3342 adapter->req_tx_entries_per_subcrq =
3343 adapter->desired.tx_entries;
3345 adapter->req_tx_entries_per_subcrq =
3346 adapter->max_tx_entries_per_subcrq;
3348 if (adapter->desired.rx_entries)
3349 adapter->req_rx_add_entries_per_subcrq =
3350 adapter->desired.rx_entries;
3352 adapter->req_rx_add_entries_per_subcrq =
3353 adapter->max_rx_add_entries_per_subcrq;
3355 if (adapter->desired.tx_queues)
3356 adapter->req_tx_queues =
3357 adapter->desired.tx_queues;
3359 adapter->req_tx_queues =
3360 adapter->opt_tx_comp_sub_queues;
3362 if (adapter->desired.rx_queues)
3363 adapter->req_rx_queues =
3364 adapter->desired.rx_queues;
3366 adapter->req_rx_queues =
3367 adapter->opt_rx_comp_queues;
3369 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3372 memset(&crq, 0, sizeof(crq));
3373 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3374 crq.request_capability.cmd = REQUEST_CAPABILITY;
3376 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3377 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3378 atomic_inc(&adapter->running_cap_crqs);
3379 ibmvnic_send_crq(adapter, &crq);
3381 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3382 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3383 atomic_inc(&adapter->running_cap_crqs);
3384 ibmvnic_send_crq(adapter, &crq);
3386 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3387 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3388 atomic_inc(&adapter->running_cap_crqs);
3389 ibmvnic_send_crq(adapter, &crq);
3391 crq.request_capability.capability =
3392 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3393 crq.request_capability.number =
3394 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3395 atomic_inc(&adapter->running_cap_crqs);
3396 ibmvnic_send_crq(adapter, &crq);
3398 crq.request_capability.capability =
3399 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3400 crq.request_capability.number =
3401 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3402 atomic_inc(&adapter->running_cap_crqs);
3403 ibmvnic_send_crq(adapter, &crq);
3405 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3406 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3407 atomic_inc(&adapter->running_cap_crqs);
3408 ibmvnic_send_crq(adapter, &crq);
3410 if (adapter->netdev->flags & IFF_PROMISC) {
3411 if (adapter->promisc_supported) {
3412 crq.request_capability.capability =
3413 cpu_to_be16(PROMISC_REQUESTED);
3414 crq.request_capability.number = cpu_to_be64(1);
3415 atomic_inc(&adapter->running_cap_crqs);
3416 ibmvnic_send_crq(adapter, &crq);
3419 crq.request_capability.capability =
3420 cpu_to_be16(PROMISC_REQUESTED);
3421 crq.request_capability.number = cpu_to_be64(0);
3422 atomic_inc(&adapter->running_cap_crqs);
3423 ibmvnic_send_crq(adapter, &crq);
3427 static int pending_scrq(struct ibmvnic_adapter *adapter,
3428 struct ibmvnic_sub_crq_queue *scrq)
3430 union sub_crq *entry = &scrq->msgs[scrq->cur];
3432 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3438 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3439 struct ibmvnic_sub_crq_queue *scrq)
3441 union sub_crq *entry;
3442 unsigned long flags;
3444 spin_lock_irqsave(&scrq->lock, flags);
3445 entry = &scrq->msgs[scrq->cur];
3446 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3447 if (++scrq->cur == scrq->size)
3452 spin_unlock_irqrestore(&scrq->lock, flags);
3457 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3459 struct ibmvnic_crq_queue *queue = &adapter->crq;
3460 union ibmvnic_crq *crq;
3462 crq = &queue->msgs[queue->cur];
3463 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3464 if (++queue->cur == queue->size)
3473 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3477 dev_warn_ratelimited(dev,
3478 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3482 dev_warn_ratelimited(dev,
3483 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3487 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3492 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3493 union sub_crq *sub_crq)
3495 unsigned int ua = adapter->vdev->unit_address;
3496 struct device *dev = &adapter->vdev->dev;
3497 u64 *u64_crq = (u64 *)sub_crq;
3500 netdev_dbg(adapter->netdev,
3501 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3502 (unsigned long int)cpu_to_be64(remote_handle),
3503 (unsigned long int)cpu_to_be64(u64_crq[0]),
3504 (unsigned long int)cpu_to_be64(u64_crq[1]),
3505 (unsigned long int)cpu_to_be64(u64_crq[2]),
3506 (unsigned long int)cpu_to_be64(u64_crq[3]));
3508 /* Make sure the hypervisor sees the complete request */
3511 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3512 cpu_to_be64(remote_handle),
3513 cpu_to_be64(u64_crq[0]),
3514 cpu_to_be64(u64_crq[1]),
3515 cpu_to_be64(u64_crq[2]),
3516 cpu_to_be64(u64_crq[3]));
3519 print_subcrq_error(dev, rc, __func__);
3524 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3525 u64 remote_handle, u64 ioba, u64 num_entries)
3527 unsigned int ua = adapter->vdev->unit_address;
3528 struct device *dev = &adapter->vdev->dev;
3531 /* Make sure the hypervisor sees the complete request */
3533 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3534 cpu_to_be64(remote_handle),
3538 print_subcrq_error(dev, rc, __func__);
3543 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3544 union ibmvnic_crq *crq)
3546 unsigned int ua = adapter->vdev->unit_address;
3547 struct device *dev = &adapter->vdev->dev;
3548 u64 *u64_crq = (u64 *)crq;
3551 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3552 (unsigned long int)cpu_to_be64(u64_crq[0]),
3553 (unsigned long int)cpu_to_be64(u64_crq[1]));
3555 if (!adapter->crq.active &&
3556 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3557 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3561 /* Make sure the hypervisor sees the complete request */
3564 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3565 cpu_to_be64(u64_crq[0]),
3566 cpu_to_be64(u64_crq[1]));
3569 if (rc == H_CLOSED) {
3570 dev_warn(dev, "CRQ Queue closed\n");
3571 if (test_bit(0, &adapter->resetting))
3572 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3575 dev_warn(dev, "Send error (rc=%d)\n", rc);
3581 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3583 union ibmvnic_crq crq;
3585 memset(&crq, 0, sizeof(crq));
3586 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3587 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3588 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3590 return ibmvnic_send_crq(adapter, &crq);
3593 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3595 union ibmvnic_crq crq;
3597 memset(&crq, 0, sizeof(crq));
3598 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3599 crq.version_exchange.cmd = VERSION_EXCHANGE;
3600 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3602 return ibmvnic_send_crq(adapter, &crq);
3605 struct vnic_login_client_data {
3611 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3615 /* Calculate the amount of buffer space needed for the
3616 * vnic client data in the login buffer. There are four entries,
3617 * OS name, LPAR name, device name, and a null last entry.
3619 len = 4 * sizeof(struct vnic_login_client_data);
3620 len += 6; /* "Linux" plus NULL */
3621 len += strlen(utsname()->nodename) + 1;
3622 len += strlen(adapter->netdev->name) + 1;
3627 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3628 struct vnic_login_client_data *vlcd)
3630 const char *os_name = "Linux";
3633 /* Type 1 - LPAR OS */
3635 len = strlen(os_name) + 1;
3636 vlcd->len = cpu_to_be16(len);
3637 strncpy(vlcd->name, os_name, len);
3638 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3640 /* Type 2 - LPAR name */
3642 len = strlen(utsname()->nodename) + 1;
3643 vlcd->len = cpu_to_be16(len);
3644 strncpy(vlcd->name, utsname()->nodename, len);
3645 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3647 /* Type 3 - device name */
3649 len = strlen(adapter->netdev->name) + 1;
3650 vlcd->len = cpu_to_be16(len);
3651 strncpy(vlcd->name, adapter->netdev->name, len);
3654 static int send_login(struct ibmvnic_adapter *adapter)
3656 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3657 struct ibmvnic_login_buffer *login_buffer;
3658 struct device *dev = &adapter->vdev->dev;
3659 dma_addr_t rsp_buffer_token;
3660 dma_addr_t buffer_token;
3661 size_t rsp_buffer_size;
3662 union ibmvnic_crq crq;
3666 int client_data_len;
3667 struct vnic_login_client_data *vlcd;
3670 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3671 netdev_err(adapter->netdev,
3672 "RX or TX queues are not allocated, device login failed\n");
3676 release_login_rsp_buffer(adapter);
3677 client_data_len = vnic_client_data_len(adapter);
3680 sizeof(struct ibmvnic_login_buffer) +
3681 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3684 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3686 goto buf_alloc_failed;
3688 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3690 if (dma_mapping_error(dev, buffer_token)) {
3691 dev_err(dev, "Couldn't map login buffer\n");
3692 goto buf_map_failed;
3695 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3696 sizeof(u64) * adapter->req_tx_queues +
3697 sizeof(u64) * adapter->req_rx_queues +
3698 sizeof(u64) * adapter->req_rx_queues +
3699 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3701 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3702 if (!login_rsp_buffer)
3703 goto buf_rsp_alloc_failed;
3705 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3706 rsp_buffer_size, DMA_FROM_DEVICE);
3707 if (dma_mapping_error(dev, rsp_buffer_token)) {
3708 dev_err(dev, "Couldn't map login rsp buffer\n");
3709 goto buf_rsp_map_failed;
3712 adapter->login_buf = login_buffer;
3713 adapter->login_buf_token = buffer_token;
3714 adapter->login_buf_sz = buffer_size;
3715 adapter->login_rsp_buf = login_rsp_buffer;
3716 adapter->login_rsp_buf_token = rsp_buffer_token;
3717 adapter->login_rsp_buf_sz = rsp_buffer_size;
3719 login_buffer->len = cpu_to_be32(buffer_size);
3720 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3721 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3722 login_buffer->off_txcomp_subcrqs =
3723 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3724 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3725 login_buffer->off_rxcomp_subcrqs =
3726 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3727 sizeof(u64) * adapter->req_tx_queues);
3728 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3729 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3731 tx_list_p = (__be64 *)((char *)login_buffer +
3732 sizeof(struct ibmvnic_login_buffer));
3733 rx_list_p = (__be64 *)((char *)login_buffer +
3734 sizeof(struct ibmvnic_login_buffer) +
3735 sizeof(u64) * adapter->req_tx_queues);
3737 for (i = 0; i < adapter->req_tx_queues; i++) {
3738 if (adapter->tx_scrq[i]) {
3739 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3744 for (i = 0; i < adapter->req_rx_queues; i++) {
3745 if (adapter->rx_scrq[i]) {
3746 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3751 /* Insert vNIC login client data */
3752 vlcd = (struct vnic_login_client_data *)
3753 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3754 login_buffer->client_data_offset =
3755 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3756 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3758 vnic_add_client_data(adapter, vlcd);
3760 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3761 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3762 netdev_dbg(adapter->netdev, "%016lx\n",
3763 ((unsigned long int *)(adapter->login_buf))[i]);
3766 memset(&crq, 0, sizeof(crq));
3767 crq.login.first = IBMVNIC_CRQ_CMD;
3768 crq.login.cmd = LOGIN;
3769 crq.login.ioba = cpu_to_be32(buffer_token);
3770 crq.login.len = cpu_to_be32(buffer_size);
3771 ibmvnic_send_crq(adapter, &crq);
3776 kfree(login_rsp_buffer);
3777 buf_rsp_alloc_failed:
3778 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3780 kfree(login_buffer);
3785 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3788 union ibmvnic_crq crq;
3790 memset(&crq, 0, sizeof(crq));
3791 crq.request_map.first = IBMVNIC_CRQ_CMD;
3792 crq.request_map.cmd = REQUEST_MAP;
3793 crq.request_map.map_id = map_id;
3794 crq.request_map.ioba = cpu_to_be32(addr);
3795 crq.request_map.len = cpu_to_be32(len);
3796 return ibmvnic_send_crq(adapter, &crq);
3799 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3801 union ibmvnic_crq crq;
3803 memset(&crq, 0, sizeof(crq));
3804 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3805 crq.request_unmap.cmd = REQUEST_UNMAP;
3806 crq.request_unmap.map_id = map_id;
3807 return ibmvnic_send_crq(adapter, &crq);
3810 static void send_map_query(struct ibmvnic_adapter *adapter)
3812 union ibmvnic_crq crq;
3814 memset(&crq, 0, sizeof(crq));
3815 crq.query_map.first = IBMVNIC_CRQ_CMD;
3816 crq.query_map.cmd = QUERY_MAP;
3817 ibmvnic_send_crq(adapter, &crq);
3820 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3821 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3823 union ibmvnic_crq crq;
3825 atomic_set(&adapter->running_cap_crqs, 0);
3826 memset(&crq, 0, sizeof(crq));
3827 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3828 crq.query_capability.cmd = QUERY_CAPABILITY;
3830 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3831 atomic_inc(&adapter->running_cap_crqs);
3832 ibmvnic_send_crq(adapter, &crq);
3834 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3835 atomic_inc(&adapter->running_cap_crqs);
3836 ibmvnic_send_crq(adapter, &crq);
3838 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3839 atomic_inc(&adapter->running_cap_crqs);
3840 ibmvnic_send_crq(adapter, &crq);
3842 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3843 atomic_inc(&adapter->running_cap_crqs);
3844 ibmvnic_send_crq(adapter, &crq);
3846 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3847 atomic_inc(&adapter->running_cap_crqs);
3848 ibmvnic_send_crq(adapter, &crq);
3850 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3851 atomic_inc(&adapter->running_cap_crqs);
3852 ibmvnic_send_crq(adapter, &crq);
3854 crq.query_capability.capability =
3855 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3856 atomic_inc(&adapter->running_cap_crqs);
3857 ibmvnic_send_crq(adapter, &crq);
3859 crq.query_capability.capability =
3860 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3861 atomic_inc(&adapter->running_cap_crqs);
3862 ibmvnic_send_crq(adapter, &crq);
3864 crq.query_capability.capability =
3865 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3866 atomic_inc(&adapter->running_cap_crqs);
3867 ibmvnic_send_crq(adapter, &crq);
3869 crq.query_capability.capability =
3870 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3871 atomic_inc(&adapter->running_cap_crqs);
3872 ibmvnic_send_crq(adapter, &crq);
3874 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3875 atomic_inc(&adapter->running_cap_crqs);
3876 ibmvnic_send_crq(adapter, &crq);
3878 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3879 atomic_inc(&adapter->running_cap_crqs);
3880 ibmvnic_send_crq(adapter, &crq);
3882 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3883 atomic_inc(&adapter->running_cap_crqs);
3884 ibmvnic_send_crq(adapter, &crq);
3886 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3887 atomic_inc(&adapter->running_cap_crqs);
3888 ibmvnic_send_crq(adapter, &crq);
3890 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3891 atomic_inc(&adapter->running_cap_crqs);
3892 ibmvnic_send_crq(adapter, &crq);
3894 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3895 atomic_inc(&adapter->running_cap_crqs);
3896 ibmvnic_send_crq(adapter, &crq);
3898 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3899 atomic_inc(&adapter->running_cap_crqs);
3900 ibmvnic_send_crq(adapter, &crq);
3902 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3903 atomic_inc(&adapter->running_cap_crqs);
3904 ibmvnic_send_crq(adapter, &crq);
3906 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3907 atomic_inc(&adapter->running_cap_crqs);
3908 ibmvnic_send_crq(adapter, &crq);
3910 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3911 atomic_inc(&adapter->running_cap_crqs);
3912 ibmvnic_send_crq(adapter, &crq);
3914 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3915 atomic_inc(&adapter->running_cap_crqs);
3916 ibmvnic_send_crq(adapter, &crq);
3918 crq.query_capability.capability =
3919 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3920 atomic_inc(&adapter->running_cap_crqs);
3921 ibmvnic_send_crq(adapter, &crq);
3923 crq.query_capability.capability =
3924 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3925 atomic_inc(&adapter->running_cap_crqs);
3926 ibmvnic_send_crq(adapter, &crq);
3928 crq.query_capability.capability =
3929 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3930 atomic_inc(&adapter->running_cap_crqs);
3931 ibmvnic_send_crq(adapter, &crq);
3933 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3934 atomic_inc(&adapter->running_cap_crqs);
3935 ibmvnic_send_crq(adapter, &crq);
3938 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3939 struct ibmvnic_adapter *adapter)
3941 struct device *dev = &adapter->vdev->dev;
3943 if (crq->get_vpd_size_rsp.rc.code) {
3944 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3945 crq->get_vpd_size_rsp.rc.code);
3946 complete(&adapter->fw_done);
3950 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3951 complete(&adapter->fw_done);
3954 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3955 struct ibmvnic_adapter *adapter)
3957 struct device *dev = &adapter->vdev->dev;
3958 unsigned char *substr = NULL;
3959 u8 fw_level_len = 0;
3961 memset(adapter->fw_version, 0, 32);
3963 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3966 if (crq->get_vpd_rsp.rc.code) {
3967 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3968 crq->get_vpd_rsp.rc.code);
3972 /* get the position of the firmware version info
3973 * located after the ASCII 'RM' substring in the buffer
3975 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3977 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3981 /* get length of firmware level ASCII substring */
3982 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3983 fw_level_len = *(substr + 2);
3985 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3989 /* copy firmware version string from vpd into adapter */
3990 if ((substr + 3 + fw_level_len) <
3991 (adapter->vpd->buff + adapter->vpd->len)) {
3992 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3994 dev_info(dev, "FW substr extrapolated VPD buff\n");
3998 if (adapter->fw_version[0] == '\0')
3999 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4000 complete(&adapter->fw_done);
4003 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4005 struct device *dev = &adapter->vdev->dev;
4006 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4007 netdev_features_t old_hw_features = 0;
4008 union ibmvnic_crq crq;
4011 dma_unmap_single(dev, adapter->ip_offload_tok,
4012 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4014 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4015 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4016 netdev_dbg(adapter->netdev, "%016lx\n",
4017 ((unsigned long int *)(buf))[i]);
4019 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4020 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4021 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4022 buf->tcp_ipv4_chksum);
4023 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4024 buf->tcp_ipv6_chksum);
4025 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4026 buf->udp_ipv4_chksum);
4027 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4028 buf->udp_ipv6_chksum);
4029 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4030 buf->large_tx_ipv4);
4031 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4032 buf->large_tx_ipv6);
4033 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4034 buf->large_rx_ipv4);
4035 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4036 buf->large_rx_ipv6);
4037 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4038 buf->max_ipv4_header_size);
4039 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4040 buf->max_ipv6_header_size);
4041 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4042 buf->max_tcp_header_size);
4043 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4044 buf->max_udp_header_size);
4045 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4046 buf->max_large_tx_size);
4047 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4048 buf->max_large_rx_size);
4049 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4050 buf->ipv6_extension_header);
4051 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4052 buf->tcp_pseudosum_req);
4053 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4054 buf->num_ipv6_ext_headers);
4055 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4056 buf->off_ipv6_ext_headers);
4058 adapter->ip_offload_ctrl_tok =
4059 dma_map_single(dev, &adapter->ip_offload_ctrl,
4060 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
4062 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4063 dev_err(dev, "Couldn't map ip offload control buffer\n");
4067 adapter->ip_offload_ctrl.len =
4068 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4069 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
4070 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
4071 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
4072 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4073 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
4074 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4075 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
4076 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
4077 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
4079 /* large_rx disabled for now, additional features needed */
4080 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
4081 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
4083 if (adapter->state != VNIC_PROBING) {
4084 old_hw_features = adapter->netdev->hw_features;
4085 adapter->netdev->hw_features = 0;
4088 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4090 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4091 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4093 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4094 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4096 if ((adapter->netdev->features &
4097 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4098 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4100 if (buf->large_tx_ipv4)
4101 adapter->netdev->hw_features |= NETIF_F_TSO;
4102 if (buf->large_tx_ipv6)
4103 adapter->netdev->hw_features |= NETIF_F_TSO6;
4105 if (adapter->state == VNIC_PROBING) {
4106 adapter->netdev->features |= adapter->netdev->hw_features;
4107 } else if (old_hw_features != adapter->netdev->hw_features) {
4108 netdev_features_t tmp = 0;
4110 /* disable features no longer supported */
4111 adapter->netdev->features &= adapter->netdev->hw_features;
4112 /* turn on features now supported if previously enabled */
4113 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4114 adapter->netdev->hw_features;
4115 adapter->netdev->features |=
4116 tmp & adapter->netdev->wanted_features;
4119 memset(&crq, 0, sizeof(crq));
4120 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4121 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4122 crq.control_ip_offload.len =
4123 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4124 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4125 ibmvnic_send_crq(adapter, &crq);
4128 static const char *ibmvnic_fw_err_cause(u16 cause)
4131 case ADAPTER_PROBLEM:
4132 return "adapter problem";
4134 return "bus problem";
4136 return "firmware problem";
4138 return "device driver problem";
4140 return "EEH recovery";
4142 return "firmware updated";
4144 return "low Memory";
4150 static void handle_error_indication(union ibmvnic_crq *crq,
4151 struct ibmvnic_adapter *adapter)
4153 struct device *dev = &adapter->vdev->dev;
4156 cause = be16_to_cpu(crq->error_indication.error_cause);
4158 dev_warn_ratelimited(dev,
4159 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4160 crq->error_indication.flags
4161 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4162 ibmvnic_fw_err_cause(cause));
4164 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4165 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4167 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4170 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4171 struct ibmvnic_adapter *adapter)
4173 struct net_device *netdev = adapter->netdev;
4174 struct device *dev = &adapter->vdev->dev;
4177 rc = crq->change_mac_addr_rsp.rc.code;
4179 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4182 ether_addr_copy(netdev->dev_addr,
4183 &crq->change_mac_addr_rsp.mac_addr[0]);
4185 complete(&adapter->fw_done);
4189 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4190 struct ibmvnic_adapter *adapter)
4192 struct device *dev = &adapter->vdev->dev;
4196 atomic_dec(&adapter->running_cap_crqs);
4197 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4199 req_value = &adapter->req_tx_queues;
4203 req_value = &adapter->req_rx_queues;
4206 case REQ_RX_ADD_QUEUES:
4207 req_value = &adapter->req_rx_add_queues;
4210 case REQ_TX_ENTRIES_PER_SUBCRQ:
4211 req_value = &adapter->req_tx_entries_per_subcrq;
4212 name = "tx_entries_per_subcrq";
4214 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4215 req_value = &adapter->req_rx_add_entries_per_subcrq;
4216 name = "rx_add_entries_per_subcrq";
4219 req_value = &adapter->req_mtu;
4222 case PROMISC_REQUESTED:
4223 req_value = &adapter->promisc;
4227 dev_err(dev, "Got invalid cap request rsp %d\n",
4228 crq->request_capability.capability);
4232 switch (crq->request_capability_rsp.rc.code) {
4235 case PARTIALSUCCESS:
4236 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4238 (long int)be64_to_cpu(crq->request_capability_rsp.
4241 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4243 pr_err("mtu of %llu is not supported. Reverting.\n",
4245 *req_value = adapter->fallback.mtu;
4248 be64_to_cpu(crq->request_capability_rsp.number);
4251 ibmvnic_send_req_caps(adapter, 1);
4254 dev_err(dev, "Error %d in request cap rsp\n",
4255 crq->request_capability_rsp.rc.code);
4259 /* Done receiving requested capabilities, query IP offload support */
4260 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4261 union ibmvnic_crq newcrq;
4262 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4263 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4264 &adapter->ip_offload_buf;
4266 adapter->wait_capability = false;
4267 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4271 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4272 if (!firmware_has_feature(FW_FEATURE_CMO))
4273 dev_err(dev, "Couldn't map offload buffer\n");
4277 memset(&newcrq, 0, sizeof(newcrq));
4278 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4279 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4280 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4281 newcrq.query_ip_offload.ioba =
4282 cpu_to_be32(adapter->ip_offload_tok);
4284 ibmvnic_send_crq(adapter, &newcrq);
4288 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4289 struct ibmvnic_adapter *adapter)
4291 struct device *dev = &adapter->vdev->dev;
4292 struct net_device *netdev = adapter->netdev;
4293 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4294 struct ibmvnic_login_buffer *login = adapter->login_buf;
4297 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4299 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4300 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4302 /* If the number of queues requested can't be allocated by the
4303 * server, the login response will return with code 1. We will need
4304 * to resend the login buffer with fewer queues requested.
4306 if (login_rsp_crq->generic.rc.code) {
4307 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4308 complete(&adapter->init_done);
4312 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4314 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4315 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4316 netdev_dbg(adapter->netdev, "%016lx\n",
4317 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4321 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4322 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4323 adapter->req_rx_add_queues !=
4324 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4325 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4326 ibmvnic_remove(adapter->vdev);
4329 release_login_buffer(adapter);
4330 complete(&adapter->init_done);
4335 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4336 struct ibmvnic_adapter *adapter)
4338 struct device *dev = &adapter->vdev->dev;
4341 rc = crq->request_unmap_rsp.rc.code;
4343 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4346 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4347 struct ibmvnic_adapter *adapter)
4349 struct net_device *netdev = adapter->netdev;
4350 struct device *dev = &adapter->vdev->dev;
4353 rc = crq->query_map_rsp.rc.code;
4355 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4358 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4359 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4360 crq->query_map_rsp.free_pages);
4363 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4364 struct ibmvnic_adapter *adapter)
4366 struct net_device *netdev = adapter->netdev;
4367 struct device *dev = &adapter->vdev->dev;
4370 atomic_dec(&adapter->running_cap_crqs);
4371 netdev_dbg(netdev, "Outstanding queries: %d\n",
4372 atomic_read(&adapter->running_cap_crqs));
4373 rc = crq->query_capability.rc.code;
4375 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4379 switch (be16_to_cpu(crq->query_capability.capability)) {
4381 adapter->min_tx_queues =
4382 be64_to_cpu(crq->query_capability.number);
4383 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4384 adapter->min_tx_queues);
4387 adapter->min_rx_queues =
4388 be64_to_cpu(crq->query_capability.number);
4389 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4390 adapter->min_rx_queues);
4392 case MIN_RX_ADD_QUEUES:
4393 adapter->min_rx_add_queues =
4394 be64_to_cpu(crq->query_capability.number);
4395 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4396 adapter->min_rx_add_queues);
4399 adapter->max_tx_queues =
4400 be64_to_cpu(crq->query_capability.number);
4401 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4402 adapter->max_tx_queues);
4405 adapter->max_rx_queues =
4406 be64_to_cpu(crq->query_capability.number);
4407 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4408 adapter->max_rx_queues);
4410 case MAX_RX_ADD_QUEUES:
4411 adapter->max_rx_add_queues =
4412 be64_to_cpu(crq->query_capability.number);
4413 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4414 adapter->max_rx_add_queues);
4416 case MIN_TX_ENTRIES_PER_SUBCRQ:
4417 adapter->min_tx_entries_per_subcrq =
4418 be64_to_cpu(crq->query_capability.number);
4419 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4420 adapter->min_tx_entries_per_subcrq);
4422 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4423 adapter->min_rx_add_entries_per_subcrq =
4424 be64_to_cpu(crq->query_capability.number);
4425 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4426 adapter->min_rx_add_entries_per_subcrq);
4428 case MAX_TX_ENTRIES_PER_SUBCRQ:
4429 adapter->max_tx_entries_per_subcrq =
4430 be64_to_cpu(crq->query_capability.number);
4431 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4432 adapter->max_tx_entries_per_subcrq);
4434 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4435 adapter->max_rx_add_entries_per_subcrq =
4436 be64_to_cpu(crq->query_capability.number);
4437 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4438 adapter->max_rx_add_entries_per_subcrq);
4440 case TCP_IP_OFFLOAD:
4441 adapter->tcp_ip_offload =
4442 be64_to_cpu(crq->query_capability.number);
4443 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4444 adapter->tcp_ip_offload);
4446 case PROMISC_SUPPORTED:
4447 adapter->promisc_supported =
4448 be64_to_cpu(crq->query_capability.number);
4449 netdev_dbg(netdev, "promisc_supported = %lld\n",
4450 adapter->promisc_supported);
4453 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4454 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4455 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4458 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4459 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4460 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4462 case MAX_MULTICAST_FILTERS:
4463 adapter->max_multicast_filters =
4464 be64_to_cpu(crq->query_capability.number);
4465 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4466 adapter->max_multicast_filters);
4468 case VLAN_HEADER_INSERTION:
4469 adapter->vlan_header_insertion =
4470 be64_to_cpu(crq->query_capability.number);
4471 if (adapter->vlan_header_insertion)
4472 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4473 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4474 adapter->vlan_header_insertion);
4476 case RX_VLAN_HEADER_INSERTION:
4477 adapter->rx_vlan_header_insertion =
4478 be64_to_cpu(crq->query_capability.number);
4479 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4480 adapter->rx_vlan_header_insertion);
4482 case MAX_TX_SG_ENTRIES:
4483 adapter->max_tx_sg_entries =
4484 be64_to_cpu(crq->query_capability.number);
4485 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4486 adapter->max_tx_sg_entries);
4488 case RX_SG_SUPPORTED:
4489 adapter->rx_sg_supported =
4490 be64_to_cpu(crq->query_capability.number);
4491 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4492 adapter->rx_sg_supported);
4494 case OPT_TX_COMP_SUB_QUEUES:
4495 adapter->opt_tx_comp_sub_queues =
4496 be64_to_cpu(crq->query_capability.number);
4497 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4498 adapter->opt_tx_comp_sub_queues);
4500 case OPT_RX_COMP_QUEUES:
4501 adapter->opt_rx_comp_queues =
4502 be64_to_cpu(crq->query_capability.number);
4503 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4504 adapter->opt_rx_comp_queues);
4506 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4507 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4508 be64_to_cpu(crq->query_capability.number);
4509 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4510 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4512 case OPT_TX_ENTRIES_PER_SUBCRQ:
4513 adapter->opt_tx_entries_per_subcrq =
4514 be64_to_cpu(crq->query_capability.number);
4515 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4516 adapter->opt_tx_entries_per_subcrq);
4518 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4519 adapter->opt_rxba_entries_per_subcrq =
4520 be64_to_cpu(crq->query_capability.number);
4521 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4522 adapter->opt_rxba_entries_per_subcrq);
4524 case TX_RX_DESC_REQ:
4525 adapter->tx_rx_desc_req = crq->query_capability.number;
4526 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4527 adapter->tx_rx_desc_req);
4531 netdev_err(netdev, "Got invalid cap rsp %d\n",
4532 crq->query_capability.capability);
4536 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4537 adapter->wait_capability = false;
4538 ibmvnic_send_req_caps(adapter, 0);
4542 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4544 union ibmvnic_crq crq;
4547 memset(&crq, 0, sizeof(crq));
4548 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4549 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4551 mutex_lock(&adapter->fw_lock);
4552 adapter->fw_done_rc = 0;
4553 reinit_completion(&adapter->fw_done);
4555 rc = ibmvnic_send_crq(adapter, &crq);
4557 mutex_unlock(&adapter->fw_lock);
4561 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4563 mutex_unlock(&adapter->fw_lock);
4567 mutex_unlock(&adapter->fw_lock);
4568 return adapter->fw_done_rc ? -EIO : 0;
4571 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4572 struct ibmvnic_adapter *adapter)
4574 struct net_device *netdev = adapter->netdev;
4576 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4578 rc = crq->query_phys_parms_rsp.rc.code;
4580 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4584 case IBMVNIC_10MBPS:
4585 adapter->speed = SPEED_10;
4587 case IBMVNIC_100MBPS:
4588 adapter->speed = SPEED_100;
4591 adapter->speed = SPEED_1000;
4594 adapter->speed = SPEED_10000;
4596 case IBMVNIC_25GBPS:
4597 adapter->speed = SPEED_25000;
4599 case IBMVNIC_40GBPS:
4600 adapter->speed = SPEED_40000;
4602 case IBMVNIC_50GBPS:
4603 adapter->speed = SPEED_50000;
4605 case IBMVNIC_100GBPS:
4606 adapter->speed = SPEED_100000;
4609 if (netif_carrier_ok(netdev))
4610 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4611 adapter->speed = SPEED_UNKNOWN;
4613 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4614 adapter->duplex = DUPLEX_FULL;
4615 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4616 adapter->duplex = DUPLEX_HALF;
4618 adapter->duplex = DUPLEX_UNKNOWN;
4623 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4624 struct ibmvnic_adapter *adapter)
4626 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4627 struct net_device *netdev = adapter->netdev;
4628 struct device *dev = &adapter->vdev->dev;
4629 u64 *u64_crq = (u64 *)crq;
4632 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4633 (unsigned long int)cpu_to_be64(u64_crq[0]),
4634 (unsigned long int)cpu_to_be64(u64_crq[1]));
4635 switch (gen_crq->first) {
4636 case IBMVNIC_CRQ_INIT_RSP:
4637 switch (gen_crq->cmd) {
4638 case IBMVNIC_CRQ_INIT:
4639 dev_info(dev, "Partner initialized\n");
4640 adapter->from_passive_init = true;
4641 adapter->failover_pending = false;
4642 if (!completion_done(&adapter->init_done)) {
4643 complete(&adapter->init_done);
4644 adapter->init_done_rc = -EIO;
4646 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4648 case IBMVNIC_CRQ_INIT_COMPLETE:
4649 dev_info(dev, "Partner initialization complete\n");
4650 adapter->crq.active = true;
4651 send_version_xchg(adapter);
4654 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4657 case IBMVNIC_CRQ_XPORT_EVENT:
4658 netif_carrier_off(netdev);
4659 adapter->crq.active = false;
4660 /* terminate any thread waiting for a response
4663 if (!completion_done(&adapter->fw_done)) {
4664 adapter->fw_done_rc = -EIO;
4665 complete(&adapter->fw_done);
4667 if (!completion_done(&adapter->stats_done))
4668 complete(&adapter->stats_done);
4669 if (test_bit(0, &adapter->resetting))
4670 adapter->force_reset_recovery = true;
4671 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4672 dev_info(dev, "Migrated, re-enabling adapter\n");
4673 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4674 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4675 dev_info(dev, "Backing device failover detected\n");
4676 adapter->failover_pending = true;
4678 /* The adapter lost the connection */
4679 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4681 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4684 case IBMVNIC_CRQ_CMD_RSP:
4687 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4692 switch (gen_crq->cmd) {
4693 case VERSION_EXCHANGE_RSP:
4694 rc = crq->version_exchange_rsp.rc.code;
4696 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4700 be16_to_cpu(crq->version_exchange_rsp.version);
4701 dev_info(dev, "Partner protocol version is %d\n",
4703 send_cap_queries(adapter);
4705 case QUERY_CAPABILITY_RSP:
4706 handle_query_cap_rsp(crq, adapter);
4709 handle_query_map_rsp(crq, adapter);
4711 case REQUEST_MAP_RSP:
4712 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4713 complete(&adapter->fw_done);
4715 case REQUEST_UNMAP_RSP:
4716 handle_request_unmap_rsp(crq, adapter);
4718 case REQUEST_CAPABILITY_RSP:
4719 handle_request_cap_rsp(crq, adapter);
4722 netdev_dbg(netdev, "Got Login Response\n");
4723 handle_login_rsp(crq, adapter);
4725 case LOGICAL_LINK_STATE_RSP:
4727 "Got Logical Link State Response, state: %d rc: %d\n",
4728 crq->logical_link_state_rsp.link_state,
4729 crq->logical_link_state_rsp.rc.code);
4730 adapter->logical_link_state =
4731 crq->logical_link_state_rsp.link_state;
4732 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4733 complete(&adapter->init_done);
4735 case LINK_STATE_INDICATION:
4736 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4737 adapter->phys_link_state =
4738 crq->link_state_indication.phys_link_state;
4739 adapter->logical_link_state =
4740 crq->link_state_indication.logical_link_state;
4741 if (adapter->phys_link_state && adapter->logical_link_state)
4742 netif_carrier_on(netdev);
4744 netif_carrier_off(netdev);
4746 case CHANGE_MAC_ADDR_RSP:
4747 netdev_dbg(netdev, "Got MAC address change Response\n");
4748 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4750 case ERROR_INDICATION:
4751 netdev_dbg(netdev, "Got Error Indication\n");
4752 handle_error_indication(crq, adapter);
4754 case REQUEST_STATISTICS_RSP:
4755 netdev_dbg(netdev, "Got Statistics Response\n");
4756 complete(&adapter->stats_done);
4758 case QUERY_IP_OFFLOAD_RSP:
4759 netdev_dbg(netdev, "Got Query IP offload Response\n");
4760 handle_query_ip_offload_rsp(adapter);
4762 case MULTICAST_CTRL_RSP:
4763 netdev_dbg(netdev, "Got multicast control Response\n");
4765 case CONTROL_IP_OFFLOAD_RSP:
4766 netdev_dbg(netdev, "Got Control IP offload Response\n");
4767 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4768 sizeof(adapter->ip_offload_ctrl),
4770 complete(&adapter->init_done);
4772 case COLLECT_FW_TRACE_RSP:
4773 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4774 complete(&adapter->fw_done);
4776 case GET_VPD_SIZE_RSP:
4777 handle_vpd_size_rsp(crq, adapter);
4780 handle_vpd_rsp(crq, adapter);
4782 case QUERY_PHYS_PARMS_RSP:
4783 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4784 complete(&adapter->fw_done);
4787 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4792 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4794 struct ibmvnic_adapter *adapter = instance;
4796 tasklet_schedule(&adapter->tasklet);
4800 static void ibmvnic_tasklet(void *data)
4802 struct ibmvnic_adapter *adapter = data;
4803 struct ibmvnic_crq_queue *queue = &adapter->crq;
4804 union ibmvnic_crq *crq;
4805 unsigned long flags;
4808 spin_lock_irqsave(&queue->lock, flags);
4810 /* Pull all the valid messages off the CRQ */
4811 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4812 ibmvnic_handle_crq(crq, adapter);
4813 crq->generic.first = 0;
4816 /* remain in tasklet until all
4817 * capabilities responses are received
4819 if (!adapter->wait_capability)
4822 /* if capabilities CRQ's were sent in this tasklet, the following
4823 * tasklet must wait until all responses are received
4825 if (atomic_read(&adapter->running_cap_crqs) != 0)
4826 adapter->wait_capability = true;
4827 spin_unlock_irqrestore(&queue->lock, flags);
4830 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4832 struct vio_dev *vdev = adapter->vdev;
4836 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4837 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4840 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4845 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4847 struct ibmvnic_crq_queue *crq = &adapter->crq;
4848 struct device *dev = &adapter->vdev->dev;
4849 struct vio_dev *vdev = adapter->vdev;
4854 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4855 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4857 /* Clean out the queue */
4858 memset(crq->msgs, 0, PAGE_SIZE);
4860 crq->active = false;
4862 /* And re-open it again */
4863 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4864 crq->msg_token, PAGE_SIZE);
4867 /* Adapter is good, but other end is not ready */
4868 dev_warn(dev, "Partner adapter not ready\n");
4870 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4875 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4877 struct ibmvnic_crq_queue *crq = &adapter->crq;
4878 struct vio_dev *vdev = adapter->vdev;
4884 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4885 free_irq(vdev->irq, adapter);
4886 tasklet_kill(&adapter->tasklet);
4888 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4889 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4891 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4893 free_page((unsigned long)crq->msgs);
4895 crq->active = false;
4898 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4900 struct ibmvnic_crq_queue *crq = &adapter->crq;
4901 struct device *dev = &adapter->vdev->dev;
4902 struct vio_dev *vdev = adapter->vdev;
4903 int rc, retrc = -ENOMEM;
4908 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4909 /* Should we allocate more than one page? */
4914 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4915 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4917 if (dma_mapping_error(dev, crq->msg_token))
4920 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4921 crq->msg_token, PAGE_SIZE);
4923 if (rc == H_RESOURCE)
4924 /* maybe kexecing and resource is busy. try a reset */
4925 rc = ibmvnic_reset_crq(adapter);
4928 if (rc == H_CLOSED) {
4929 dev_warn(dev, "Partner adapter not ready\n");
4931 dev_warn(dev, "Error %d opening adapter\n", rc);
4932 goto reg_crq_failed;
4937 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4938 (unsigned long)adapter);
4940 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4941 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4942 adapter->vdev->unit_address);
4943 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4945 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4947 goto req_irq_failed;
4950 rc = vio_enable_interrupts(vdev);
4952 dev_err(dev, "Error %d enabling interrupts\n", rc);
4953 goto req_irq_failed;
4957 spin_lock_init(&crq->lock);
4962 tasklet_kill(&adapter->tasklet);
4964 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4965 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4967 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4969 free_page((unsigned long)crq->msgs);
4974 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4976 struct device *dev = &adapter->vdev->dev;
4977 unsigned long timeout = msecs_to_jiffies(30000);
4978 u64 old_num_rx_queues, old_num_tx_queues;
4981 adapter->from_passive_init = false;
4983 old_num_rx_queues = adapter->req_rx_queues;
4984 old_num_tx_queues = adapter->req_tx_queues;
4986 reinit_completion(&adapter->init_done);
4987 adapter->init_done_rc = 0;
4988 ibmvnic_send_crq_init(adapter);
4989 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4990 dev_err(dev, "Initialization sequence timed out\n");
4994 if (adapter->init_done_rc) {
4995 release_crq_queue(adapter);
4996 return adapter->init_done_rc;
4999 if (adapter->from_passive_init) {
5000 adapter->state = VNIC_OPEN;
5001 adapter->from_passive_init = false;
5005 if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5006 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5007 if (adapter->req_rx_queues != old_num_rx_queues ||
5008 adapter->req_tx_queues != old_num_tx_queues) {
5009 release_sub_crqs(adapter, 0);
5010 rc = init_sub_crqs(adapter);
5012 rc = reset_sub_crq_queues(adapter);
5015 rc = init_sub_crqs(adapter);
5019 dev_err(dev, "Initialization of sub crqs failed\n");
5020 release_crq_queue(adapter);
5024 rc = init_sub_crq_irqs(adapter);
5026 dev_err(dev, "Failed to initialize sub crq irqs\n");
5027 release_crq_queue(adapter);
5033 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
5035 struct device *dev = &adapter->vdev->dev;
5036 unsigned long timeout = msecs_to_jiffies(30000);
5039 adapter->from_passive_init = false;
5041 adapter->init_done_rc = 0;
5042 ibmvnic_send_crq_init(adapter);
5043 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5044 dev_err(dev, "Initialization sequence timed out\n");
5048 if (adapter->init_done_rc) {
5049 release_crq_queue(adapter);
5050 return adapter->init_done_rc;
5053 if (adapter->from_passive_init) {
5054 adapter->state = VNIC_OPEN;
5055 adapter->from_passive_init = false;
5059 rc = init_sub_crqs(adapter);
5061 dev_err(dev, "Initialization of sub crqs failed\n");
5062 release_crq_queue(adapter);
5066 rc = init_sub_crq_irqs(adapter);
5068 dev_err(dev, "Failed to initialize sub crq irqs\n");
5069 release_crq_queue(adapter);
5075 static struct device_attribute dev_attr_failover;
5077 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5079 struct ibmvnic_adapter *adapter;
5080 struct net_device *netdev;
5081 unsigned char *mac_addr_p;
5084 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5087 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5088 VETH_MAC_ADDR, NULL);
5091 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5092 __FILE__, __LINE__);
5096 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5097 IBMVNIC_MAX_QUEUES);
5101 adapter = netdev_priv(netdev);
5102 adapter->state = VNIC_PROBING;
5103 dev_set_drvdata(&dev->dev, netdev);
5104 adapter->vdev = dev;
5105 adapter->netdev = netdev;
5107 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5108 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5109 netdev->irq = dev->irq;
5110 netdev->netdev_ops = &ibmvnic_netdev_ops;
5111 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5112 SET_NETDEV_DEV(netdev, &dev->dev);
5114 spin_lock_init(&adapter->stats_lock);
5116 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5117 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5118 __ibmvnic_delayed_reset);
5119 INIT_LIST_HEAD(&adapter->rwi_list);
5120 spin_lock_init(&adapter->rwi_lock);
5121 spin_lock_init(&adapter->state_lock);
5122 mutex_init(&adapter->fw_lock);
5123 init_completion(&adapter->init_done);
5124 init_completion(&adapter->fw_done);
5125 init_completion(&adapter->reset_done);
5126 init_completion(&adapter->stats_done);
5127 clear_bit(0, &adapter->resetting);
5130 rc = init_crq_queue(adapter);
5132 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5134 goto ibmvnic_init_fail;
5137 rc = ibmvnic_init(adapter);
5138 if (rc && rc != EAGAIN)
5139 goto ibmvnic_init_fail;
5140 } while (rc == EAGAIN);
5142 rc = init_stats_buffers(adapter);
5144 goto ibmvnic_init_fail;
5146 rc = init_stats_token(adapter);
5148 goto ibmvnic_stats_fail;
5150 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5151 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5152 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5154 rc = device_create_file(&dev->dev, &dev_attr_failover);
5156 goto ibmvnic_dev_file_err;
5158 netif_carrier_off(netdev);
5159 rc = register_netdev(netdev);
5161 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5162 goto ibmvnic_register_fail;
5164 dev_info(&dev->dev, "ibmvnic registered\n");
5166 adapter->state = VNIC_PROBED;
5168 adapter->wait_for_reset = false;
5172 ibmvnic_register_fail:
5173 device_remove_file(&dev->dev, &dev_attr_failover);
5175 ibmvnic_dev_file_err:
5176 release_stats_token(adapter);
5179 release_stats_buffers(adapter);
5182 release_sub_crqs(adapter, 1);
5183 release_crq_queue(adapter);
5184 mutex_destroy(&adapter->fw_lock);
5185 free_netdev(netdev);
5190 static int ibmvnic_remove(struct vio_dev *dev)
5192 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5193 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5194 unsigned long flags;
5196 spin_lock_irqsave(&adapter->state_lock, flags);
5197 if (adapter->state == VNIC_RESETTING) {
5198 spin_unlock_irqrestore(&adapter->state_lock, flags);
5202 adapter->state = VNIC_REMOVING;
5203 spin_unlock_irqrestore(&adapter->state_lock, flags);
5205 flush_work(&adapter->ibmvnic_reset);
5206 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5209 unregister_netdevice(netdev);
5211 release_resources(adapter);
5212 release_sub_crqs(adapter, 1);
5213 release_crq_queue(adapter);
5215 release_stats_token(adapter);
5216 release_stats_buffers(adapter);
5218 adapter->state = VNIC_REMOVED;
5221 mutex_destroy(&adapter->fw_lock);
5222 device_remove_file(&dev->dev, &dev_attr_failover);
5223 free_netdev(netdev);
5224 dev_set_drvdata(&dev->dev, NULL);
5229 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5230 const char *buf, size_t count)
5232 struct net_device *netdev = dev_get_drvdata(dev);
5233 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5234 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5235 __be64 session_token;
5238 if (!sysfs_streq(buf, "1"))
5241 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5242 H_GET_SESSION_TOKEN, 0, 0, 0);
5244 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5249 session_token = (__be64)retbuf[0];
5250 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5251 be64_to_cpu(session_token));
5252 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5253 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5255 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5263 static DEVICE_ATTR_WO(failover);
5265 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5267 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5268 struct ibmvnic_adapter *adapter;
5269 struct iommu_table *tbl;
5270 unsigned long ret = 0;
5273 tbl = get_iommu_table_base(&vdev->dev);
5275 /* netdev inits at probe time along with the structures we need below*/
5277 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5279 adapter = netdev_priv(netdev);
5281 ret += PAGE_SIZE; /* the crq message queue */
5282 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5284 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5285 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5287 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5289 ret += adapter->rx_pool[i].size *
5290 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5295 static int ibmvnic_resume(struct device *dev)
5297 struct net_device *netdev = dev_get_drvdata(dev);
5298 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5300 if (adapter->state != VNIC_OPEN)
5303 tasklet_schedule(&adapter->tasklet);
5308 static const struct vio_device_id ibmvnic_device_table[] = {
5309 {"network", "IBM,vnic"},
5312 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5314 static const struct dev_pm_ops ibmvnic_pm_ops = {
5315 .resume = ibmvnic_resume
5318 static struct vio_driver ibmvnic_driver = {
5319 .id_table = ibmvnic_device_table,
5320 .probe = ibmvnic_probe,
5321 .remove = ibmvnic_remove,
5322 .get_desired_dma = ibmvnic_get_desired_dma,
5323 .name = ibmvnic_driver_name,
5324 .pm = &ibmvnic_pm_ops,
5327 /* module functions */
5328 static int __init ibmvnic_module_init(void)
5330 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5331 IBMVNIC_DRIVER_VERSION);
5333 return vio_register_driver(&ibmvnic_driver);
5336 static void __exit ibmvnic_module_exit(void)
5338 vio_unregister_driver(&ibmvnic_driver);
5341 module_init(ibmvnic_module_init);
5342 module_exit(ibmvnic_module_exit);