1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/irqdomain.h>
57 #include <linux/kthread.h>
58 #include <linux/seq_file.h>
59 #include <linux/interrupt.h>
60 #include <net/net_namespace.h>
61 #include <asm/hvcall.h>
62 #include <linux/atomic.h>
65 #include <asm/iommu.h>
66 #include <linux/uaccess.h>
67 #include <asm/firmware.h>
68 #include <linux/workqueue.h>
69 #include <linux/if_vlan.h>
70 #include <linux/utsname.h>
71 #include <linux/cpu.h>
75 static const char ibmvnic_driver_name[] = "ibmvnic";
76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
78 MODULE_AUTHOR("Santiago Leon");
79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
84 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
101 static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
102 static void send_query_map(struct ibmvnic_adapter *adapter);
103 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
104 static int send_request_unmap(struct ibmvnic_adapter *, u8);
105 static int send_login(struct ibmvnic_adapter *adapter);
106 static void send_query_cap(struct ibmvnic_adapter *adapter);
107 static int init_sub_crqs(struct ibmvnic_adapter *);
108 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
109 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
110 static void release_crq_queue(struct ibmvnic_adapter *);
111 static int __ibmvnic_set_mac(struct net_device *, u8 *);
112 static int init_crq_queue(struct ibmvnic_adapter *adapter);
113 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
115 struct ibmvnic_sub_crq_queue *tx_scrq);
116 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
117 struct ibmvnic_long_term_buff *ltb);
118 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
119 static void flush_reset_queue(struct ibmvnic_adapter *adapter);
121 struct ibmvnic_stat {
122 char name[ETH_GSTRING_LEN];
126 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
127 offsetof(struct ibmvnic_statistics, stat))
128 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
130 static const struct ibmvnic_stat ibmvnic_stats[] = {
131 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
132 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
133 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
134 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
135 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
136 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
137 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
138 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
139 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
140 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
141 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
142 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
143 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
144 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
145 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
146 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
147 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
148 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
149 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
150 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
151 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
152 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
155 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
157 union ibmvnic_crq crq;
159 memset(&crq, 0, sizeof(crq));
160 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
161 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
163 return ibmvnic_send_crq(adapter, &crq);
166 static int send_version_xchg(struct ibmvnic_adapter *adapter)
168 union ibmvnic_crq crq;
170 memset(&crq, 0, sizeof(crq));
171 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
172 crq.version_exchange.cmd = VERSION_EXCHANGE;
173 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
175 return ibmvnic_send_crq(adapter, &crq);
178 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
179 struct ibmvnic_sub_crq_queue *queue)
181 if (!(queue && queue->irq))
184 cpumask_clear(queue->affinity_mask);
186 if (irq_set_affinity_and_hint(queue->irq, NULL))
187 netdev_warn(adapter->netdev,
188 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
189 __func__, queue, queue->irq);
192 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
194 struct ibmvnic_sub_crq_queue **rxqs;
195 struct ibmvnic_sub_crq_queue **txqs;
196 int num_rxqs, num_txqs;
199 rxqs = adapter->rx_scrq;
200 txqs = adapter->tx_scrq;
201 num_txqs = adapter->num_active_tx_scrqs;
202 num_rxqs = adapter->num_active_rx_scrqs;
204 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
206 for (i = 0; i < num_txqs; i++)
207 ibmvnic_clean_queue_affinity(adapter, txqs[i]);
210 for (i = 0; i < num_rxqs; i++)
211 ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
215 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
216 unsigned int *cpu, int *stragglers,
223 if (!(queue && queue->irq))
226 /* cpumask_var_t is either a pointer or array, allocation works here */
227 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
230 /* while we have extra cpu give one extra to this irq */
235 /* atomic write is safer than writing bit by bit directly */
236 for (i = 0; i < stride; i++) {
237 cpumask_set_cpu(*cpu, mask);
238 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
241 /* set queue affinity mask */
242 cpumask_copy(queue->affinity_mask, mask);
243 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
244 free_cpumask_var(mask);
249 /* assumes cpu read lock is held */
250 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
252 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
253 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
254 struct ibmvnic_sub_crq_queue *queue;
255 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
256 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
257 int total_queues, stride, stragglers, i;
258 unsigned int num_cpu, cpu;
262 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
263 if (!(adapter->rx_scrq && adapter->tx_scrq)) {
264 netdev_warn(adapter->netdev,
265 "%s: Set affinity failed, queues not allocated\n",
270 total_queues = num_rxqs + num_txqs;
271 num_cpu = num_online_cpus();
272 /* number of cpu's assigned per irq */
273 stride = max_t(int, num_cpu / total_queues, 1);
274 /* number of leftover cpu's */
275 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
276 /* next available cpu to assign irq to */
277 cpu = cpumask_next(-1, cpu_online_mask);
279 for (i = 0; i < total_queues; i++) {
281 /* balance core load by alternating rx and tx assignments
282 * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
284 if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
285 queue = rxqs[i_rxqs++];
288 queue = txqs[i_txqs++];
291 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
296 if (!queue || is_rx_queue)
299 rc = __netif_set_xps_queue(adapter->netdev,
300 cpumask_bits(queue->affinity_mask),
301 i_txqs - 1, XPS_CPUS);
303 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
304 __func__, i_txqs - 1, rc);
309 netdev_warn(adapter->netdev,
310 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
311 __func__, queue, queue->irq, rc);
312 ibmvnic_clean_affinity(adapter);
316 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
318 struct ibmvnic_adapter *adapter;
320 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
321 ibmvnic_set_affinity(adapter);
325 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
327 struct ibmvnic_adapter *adapter;
329 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
330 ibmvnic_set_affinity(adapter);
334 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
336 struct ibmvnic_adapter *adapter;
338 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
339 ibmvnic_clean_affinity(adapter);
343 static enum cpuhp_state ibmvnic_online;
345 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
349 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
352 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
353 &adapter->node_dead);
356 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
360 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
362 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
363 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
364 &adapter->node_dead);
367 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
368 unsigned long length, unsigned long *number,
371 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
374 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
382 * ibmvnic_wait_for_completion - Check device state and wait for completion
383 * @adapter: private device data
384 * @comp_done: completion structure to wait for
385 * @timeout: time to wait in milliseconds
387 * Wait for a completion signal or until the timeout limit is reached
388 * while checking that the device is still active.
390 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
391 struct completion *comp_done,
392 unsigned long timeout)
394 struct net_device *netdev;
395 unsigned long div_timeout;
398 netdev = adapter->netdev;
400 div_timeout = msecs_to_jiffies(timeout / retry);
402 if (!adapter->crq.active) {
403 netdev_err(netdev, "Device down!\n");
408 if (wait_for_completion_timeout(comp_done, div_timeout))
411 netdev_err(netdev, "Operation timed out.\n");
416 * reuse_ltb() - Check if a long term buffer can be reused
417 * @ltb: The long term buffer to be checked
418 * @size: The size of the long term buffer.
420 * An LTB can be reused unless its size has changed.
422 * Return: Return true if the LTB can be reused, false otherwise.
424 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
426 return (ltb->buff && ltb->size == size);
430 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
432 * @adapter: ibmvnic adapter associated to the LTB
433 * @ltb: container object for the LTB
434 * @size: size of the LTB
436 * Allocate an LTB of the specified size and notify VIOS.
438 * If the given @ltb already has the correct size, reuse it. Otherwise if
439 * its non-NULL, free it. Then allocate a new one of the correct size.
440 * Notify the VIOS either way since we may now be working with a new VIOS.
442 * Allocating larger chunks of memory during resets, specially LPM or under
443 * low memory situations can cause resets to fail/timeout and for LPAR to
444 * lose connectivity. So hold onto the LTB even if we fail to communicate
445 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
447 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
448 * a negative value otherwise.
450 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
451 struct ibmvnic_long_term_buff *ltb, int size)
453 struct device *dev = &adapter->vdev->dev;
457 if (!reuse_ltb(ltb, size)) {
459 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
462 free_long_term_buff(adapter, ltb);
466 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
467 ltb->map_id, ltb->size);
469 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
472 dev_err(dev, "Couldn't alloc long term buffer\n");
477 ltb->map_id = find_first_zero_bit(adapter->map_ids,
479 bitmap_set(adapter->map_ids, ltb->map_id, 1);
482 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
483 ltb->map_id, ltb->size, prev);
486 /* Ensure ltb is zeroed - specially when reusing it. */
487 memset(ltb->buff, 0, ltb->size);
489 mutex_lock(&adapter->fw_lock);
490 adapter->fw_done_rc = 0;
491 reinit_completion(&adapter->fw_done);
493 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
495 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
499 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
501 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
506 if (adapter->fw_done_rc) {
507 dev_err(dev, "Couldn't map LTB, rc = %d\n",
508 adapter->fw_done_rc);
514 /* don't free LTB on communication error - see function header */
515 mutex_unlock(&adapter->fw_lock);
519 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
520 struct ibmvnic_long_term_buff *ltb)
522 struct device *dev = &adapter->vdev->dev;
527 /* VIOS automatically unmaps the long term buffer at remote
528 * end for the following resets:
529 * FAILOVER, MOBILITY, TIMEOUT.
531 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
532 adapter->reset_reason != VNIC_RESET_MOBILITY &&
533 adapter->reset_reason != VNIC_RESET_TIMEOUT)
534 send_request_unmap(adapter, ltb->map_id);
536 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
539 /* mark this map_id free */
540 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
545 * free_ltb_set - free the given set of long term buffers (LTBS)
546 * @adapter: The ibmvnic adapter containing this ltb set
547 * @ltb_set: The ltb_set to be freed
549 * Free the set of LTBs in the given set.
552 static void free_ltb_set(struct ibmvnic_adapter *adapter,
553 struct ibmvnic_ltb_set *ltb_set)
557 for (i = 0; i < ltb_set->num_ltbs; i++)
558 free_long_term_buff(adapter, <b_set->ltbs[i]);
560 kfree(ltb_set->ltbs);
561 ltb_set->ltbs = NULL;
562 ltb_set->num_ltbs = 0;
566 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
568 * @adapter: ibmvnic adapter associated to the LTB
569 * @ltb_set: container object for the set of LTBs
570 * @num_buffs: Number of buffers in the LTB
571 * @buff_size: Size of each buffer in the LTB
573 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
574 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
575 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
576 * If new set needs more than in old set, allocate the remaining ones.
577 * Try and reuse as many LTBs as possible and avoid reallocation.
579 * Any changes to this allocation strategy must be reflected in
580 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
582 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
583 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
586 struct device *dev = &adapter->vdev->dev;
587 struct ibmvnic_ltb_set old_set;
588 struct ibmvnic_ltb_set new_set;
590 int tot_size; /* size of all ltbs */
591 int ltb_size; /* size of one ltb */
597 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
600 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
601 tot_size = num_buffs * buff_size;
603 if (ltb_size > tot_size)
606 nltbs = tot_size / ltb_size;
607 if (tot_size % ltb_size)
612 if (old_set.num_ltbs == nltbs) {
615 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
617 new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
621 new_set.num_ltbs = nltbs;
623 /* Free any excess ltbs in old set */
624 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
625 free_long_term_buff(adapter, &old_set.ltbs[i]);
627 /* Copy remaining ltbs to new set. All LTBs except the
628 * last one are of the same size. alloc_long_term_buff()
629 * will realloc if the size changes.
631 n = min(old_set.num_ltbs, new_set.num_ltbs);
632 for (i = 0; i < n; i++)
633 new_set.ltbs[i] = old_set.ltbs[i];
635 /* Any additional ltbs in new set will have NULL ltbs for
636 * now and will be allocated in alloc_long_term_buff().
639 /* We no longer need the old_set so free it. Note that we
640 * may have reused some ltbs from old set and freed excess
641 * ltbs above. So we only need to free the container now
642 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
646 old_set.num_ltbs = 0;
648 /* Install the new set. If allocations fail below, we will
649 * retry later and know what size LTBs we need.
657 if (ltb_size > rem_size)
660 rem_size -= ltb_size;
662 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
668 WARN_ON(i != new_set.num_ltbs);
672 /* We may have allocated one/more LTBs before failing and we
673 * want to try and reuse on next reset. So don't free ltb set.
679 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
680 * @rxpool: The receive buffer pool containing buffer
681 * @bufidx: Index of buffer in rxpool
682 * @ltbp: (Output) pointer to the long term buffer containing the buffer
683 * @offset: (Output) offset of buffer in the LTB from @ltbp
685 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
686 * pool and its corresponding offset. Assume for now that each LTB is of
687 * different size but could possibly be optimized based on the allocation
688 * strategy in alloc_ltb_set().
690 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
692 struct ibmvnic_long_term_buff **ltbp,
693 unsigned int *offset)
695 struct ibmvnic_long_term_buff *ltb;
696 int nbufs; /* # of buffers in one ltb */
699 WARN_ON(bufidx >= rxpool->size);
701 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
702 ltb = &rxpool->ltb_set.ltbs[i];
703 nbufs = ltb->size / rxpool->buff_size;
710 *offset = bufidx * rxpool->buff_size;
714 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
715 * @txpool: The transmit buffer pool containing buffer
716 * @bufidx: Index of buffer in txpool
717 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
718 * @offset: (Output) offset of buffer in the LTB from @ltbp
720 * Map the given buffer identified by [txpool, bufidx] to an LTB in the
721 * pool and its corresponding offset.
723 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
725 struct ibmvnic_long_term_buff **ltbp,
726 unsigned int *offset)
728 struct ibmvnic_long_term_buff *ltb;
729 int nbufs; /* # of buffers in one ltb */
732 WARN_ON_ONCE(bufidx >= txpool->num_buffers);
734 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
735 ltb = &txpool->ltb_set.ltbs[i];
736 nbufs = ltb->size / txpool->buf_size;
743 *offset = bufidx * txpool->buf_size;
746 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
750 for (i = 0; i < adapter->num_active_rx_pools; i++)
751 adapter->rx_pool[i].active = 0;
754 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
755 struct ibmvnic_rx_pool *pool)
757 int count = pool->size - atomic_read(&pool->available);
758 u64 handle = adapter->rx_scrq[pool->index]->handle;
759 struct device *dev = &adapter->vdev->dev;
760 struct ibmvnic_ind_xmit_queue *ind_bufp;
761 struct ibmvnic_sub_crq_queue *rx_scrq;
762 struct ibmvnic_long_term_buff *ltb;
763 union sub_crq *sub_crq;
764 int buffers_added = 0;
765 unsigned long lpar_rc;
777 rx_scrq = adapter->rx_scrq[pool->index];
778 ind_bufp = &rx_scrq->ind_buf;
780 /* netdev_skb_alloc() could have failed after we saved a few skbs
781 * in the indir_buf and we would not have sent them to VIOS yet.
782 * To account for them, start the loop at ind_bufp->index rather
783 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
786 for (i = ind_bufp->index; i < count; ++i) {
787 bufidx = pool->free_map[pool->next_free];
789 /* We maybe reusing the skb from earlier resets. Allocate
790 * only if necessary. But since the LTB may have changed
791 * during reset (see init_rx_pools()), update LTB below
792 * even if reusing skb.
794 skb = pool->rx_buff[bufidx].skb;
796 skb = netdev_alloc_skb(adapter->netdev,
799 dev_err(dev, "Couldn't replenish rx buff\n");
800 adapter->replenish_no_mem++;
805 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
806 pool->next_free = (pool->next_free + 1) % pool->size;
808 /* Copy the skb to the long term mapped DMA buffer */
809 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset);
810 dst = ltb->buff + offset;
811 memset(dst, 0, pool->buff_size);
812 dma_addr = ltb->addr + offset;
814 /* add the skb to an rx_buff in the pool */
815 pool->rx_buff[bufidx].data = dst;
816 pool->rx_buff[bufidx].dma = dma_addr;
817 pool->rx_buff[bufidx].skb = skb;
818 pool->rx_buff[bufidx].pool_index = pool->index;
819 pool->rx_buff[bufidx].size = pool->buff_size;
821 /* queue the rx_buff for the next send_subcrq_indirect */
822 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
823 memset(sub_crq, 0, sizeof(*sub_crq));
824 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
825 sub_crq->rx_add.correlator =
826 cpu_to_be64((u64)&pool->rx_buff[bufidx]);
827 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
828 sub_crq->rx_add.map_id = ltb->map_id;
830 /* The length field of the sCRQ is defined to be 24 bits so the
831 * buffer size needs to be left shifted by a byte before it is
832 * converted to big endian to prevent the last byte from being
835 #ifdef __LITTLE_ENDIAN__
838 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
840 /* if send_subcrq_indirect queue is full, flush to VIOS */
841 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
844 send_subcrq_indirect(adapter, handle,
845 (u64)ind_bufp->indir_dma,
846 (u64)ind_bufp->index);
847 if (lpar_rc != H_SUCCESS)
849 buffers_added += ind_bufp->index;
850 adapter->replenish_add_buff_success += ind_bufp->index;
854 atomic_add(buffers_added, &pool->available);
858 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
859 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
860 for (i = ind_bufp->index - 1; i >= 0; --i) {
861 struct ibmvnic_rx_buff *rx_buff;
863 pool->next_free = pool->next_free == 0 ?
864 pool->size - 1 : pool->next_free - 1;
865 sub_crq = &ind_bufp->indir_arr[i];
866 rx_buff = (struct ibmvnic_rx_buff *)
867 be64_to_cpu(sub_crq->rx_add.correlator);
868 bufidx = (int)(rx_buff - pool->rx_buff);
869 pool->free_map[pool->next_free] = bufidx;
870 dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
871 pool->rx_buff[bufidx].skb = NULL;
873 adapter->replenish_add_buff_failure += ind_bufp->index;
874 atomic_add(buffers_added, &pool->available);
876 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
877 /* Disable buffer pool replenishment and report carrier off if
878 * queue is closed or pending failover.
879 * Firmware guarantees that a signal will be sent to the
880 * driver, triggering a reset.
882 deactivate_rx_pools(adapter);
883 netif_carrier_off(adapter->netdev);
887 static void replenish_pools(struct ibmvnic_adapter *adapter)
891 adapter->replenish_task_cycles++;
892 for (i = 0; i < adapter->num_active_rx_pools; i++) {
893 if (adapter->rx_pool[i].active)
894 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
897 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
900 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
902 kfree(adapter->tx_stats_buffers);
903 kfree(adapter->rx_stats_buffers);
904 adapter->tx_stats_buffers = NULL;
905 adapter->rx_stats_buffers = NULL;
908 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
910 adapter->tx_stats_buffers =
911 kcalloc(IBMVNIC_MAX_QUEUES,
912 sizeof(struct ibmvnic_tx_queue_stats),
914 if (!adapter->tx_stats_buffers)
917 adapter->rx_stats_buffers =
918 kcalloc(IBMVNIC_MAX_QUEUES,
919 sizeof(struct ibmvnic_rx_queue_stats),
921 if (!adapter->rx_stats_buffers)
927 static void release_stats_token(struct ibmvnic_adapter *adapter)
929 struct device *dev = &adapter->vdev->dev;
931 if (!adapter->stats_token)
934 dma_unmap_single(dev, adapter->stats_token,
935 sizeof(struct ibmvnic_statistics),
937 adapter->stats_token = 0;
940 static int init_stats_token(struct ibmvnic_adapter *adapter)
942 struct device *dev = &adapter->vdev->dev;
946 stok = dma_map_single(dev, &adapter->stats,
947 sizeof(struct ibmvnic_statistics),
949 rc = dma_mapping_error(dev, stok);
951 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
955 adapter->stats_token = stok;
956 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
961 * release_rx_pools() - Release any rx pools attached to @adapter.
962 * @adapter: ibmvnic adapter
964 * Safe to call this multiple times - even if no pools are attached.
966 static void release_rx_pools(struct ibmvnic_adapter *adapter)
968 struct ibmvnic_rx_pool *rx_pool;
971 if (!adapter->rx_pool)
974 for (i = 0; i < adapter->num_active_rx_pools; i++) {
975 rx_pool = &adapter->rx_pool[i];
977 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
979 kfree(rx_pool->free_map);
981 free_ltb_set(adapter, &rx_pool->ltb_set);
983 if (!rx_pool->rx_buff)
986 for (j = 0; j < rx_pool->size; j++) {
987 if (rx_pool->rx_buff[j].skb) {
988 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
989 rx_pool->rx_buff[j].skb = NULL;
993 kfree(rx_pool->rx_buff);
996 kfree(adapter->rx_pool);
997 adapter->rx_pool = NULL;
998 adapter->num_active_rx_pools = 0;
999 adapter->prev_rx_pool_size = 0;
1003 * reuse_rx_pools() - Check if the existing rx pools can be reused.
1004 * @adapter: ibmvnic adapter
1006 * Check if the existing rx pools in the adapter can be reused. The
1007 * pools can be reused if the pool parameters (number of pools,
1008 * number of buffers in the pool and size of each buffer) have not
1011 * NOTE: This assumes that all pools have the same number of buffers
1012 * which is the case currently. If that changes, we must fix this.
1014 * Return: true if the rx pools can be reused, false otherwise.
1016 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1018 u64 old_num_pools, new_num_pools;
1019 u64 old_pool_size, new_pool_size;
1020 u64 old_buff_size, new_buff_size;
1022 if (!adapter->rx_pool)
1025 old_num_pools = adapter->num_active_rx_pools;
1026 new_num_pools = adapter->req_rx_queues;
1028 old_pool_size = adapter->prev_rx_pool_size;
1029 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1031 old_buff_size = adapter->prev_rx_buf_sz;
1032 new_buff_size = adapter->cur_rx_buf_sz;
1034 if (old_buff_size != new_buff_size ||
1035 old_num_pools != new_num_pools ||
1036 old_pool_size != new_pool_size)
1043 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1044 * @netdev: net device associated with the vnic interface
1046 * Initialize the set of receiver pools in the ibmvnic adapter associated
1047 * with the net_device @netdev. If possible, reuse the existing rx pools.
1048 * Otherwise free any existing pools and allocate a new set of pools
1049 * before initializing them.
1051 * Return: 0 on success and negative value on error.
1053 static int init_rx_pools(struct net_device *netdev)
1055 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1056 struct device *dev = &adapter->vdev->dev;
1057 struct ibmvnic_rx_pool *rx_pool;
1059 u64 pool_size; /* # of buffers in one pool */
1063 pool_size = adapter->req_rx_add_entries_per_subcrq;
1064 num_pools = adapter->req_rx_queues;
1065 buff_size = adapter->cur_rx_buf_sz;
1067 if (reuse_rx_pools(adapter)) {
1068 dev_dbg(dev, "Reusing rx pools\n");
1072 /* Allocate/populate the pools. */
1073 release_rx_pools(adapter);
1075 adapter->rx_pool = kcalloc(num_pools,
1076 sizeof(struct ibmvnic_rx_pool),
1078 if (!adapter->rx_pool) {
1079 dev_err(dev, "Failed to allocate rx pools\n");
1083 /* Set num_active_rx_pools early. If we fail below after partial
1084 * allocation, release_rx_pools() will know how many to look for.
1086 adapter->num_active_rx_pools = num_pools;
1088 for (i = 0; i < num_pools; i++) {
1089 rx_pool = &adapter->rx_pool[i];
1091 netdev_dbg(adapter->netdev,
1092 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
1093 i, pool_size, buff_size);
1095 rx_pool->size = pool_size;
1097 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1099 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
1101 if (!rx_pool->free_map) {
1102 dev_err(dev, "Couldn't alloc free_map %d\n", i);
1107 rx_pool->rx_buff = kcalloc(rx_pool->size,
1108 sizeof(struct ibmvnic_rx_buff),
1110 if (!rx_pool->rx_buff) {
1111 dev_err(dev, "Couldn't alloc rx buffers\n");
1117 adapter->prev_rx_pool_size = pool_size;
1118 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
1121 for (i = 0; i < num_pools; i++) {
1122 rx_pool = &adapter->rx_pool[i];
1123 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
1124 i, rx_pool->size, rx_pool->buff_size);
1126 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1127 rx_pool->size, rx_pool->buff_size);
1131 for (j = 0; j < rx_pool->size; ++j) {
1132 struct ibmvnic_rx_buff *rx_buff;
1134 rx_pool->free_map[j] = j;
1136 /* NOTE: Don't clear rx_buff->skb here - will leak
1137 * memory! replenish_rx_pool() will reuse skbs or
1138 * allocate as necessary.
1140 rx_buff = &rx_pool->rx_buff[j];
1144 rx_buff->pool_index = 0;
1147 /* Mark pool "empty" so replenish_rx_pools() will
1148 * update the LTB info for each buffer
1150 atomic_set(&rx_pool->available, 0);
1151 rx_pool->next_alloc = 0;
1152 rx_pool->next_free = 0;
1153 /* replenish_rx_pool() may have called deactivate_rx_pools()
1154 * on failover. Ensure pool is active now.
1156 rx_pool->active = 1;
1160 release_rx_pools(adapter);
1162 /* We failed to allocate one or more LTBs or map them on the VIOS.
1163 * Hold onto the pools and any LTBs that we did allocate/map.
1168 static void release_vpd_data(struct ibmvnic_adapter *adapter)
1173 kfree(adapter->vpd->buff);
1174 kfree(adapter->vpd);
1176 adapter->vpd = NULL;
1179 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1180 struct ibmvnic_tx_pool *tx_pool)
1182 kfree(tx_pool->tx_buff);
1183 kfree(tx_pool->free_map);
1184 free_ltb_set(adapter, &tx_pool->ltb_set);
1188 * release_tx_pools() - Release any tx pools attached to @adapter.
1189 * @adapter: ibmvnic adapter
1191 * Safe to call this multiple times - even if no pools are attached.
1193 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1197 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
1198 * both NULL or both non-NULL. So we only need to check one.
1200 if (!adapter->tx_pool)
1203 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1204 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1205 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1208 kfree(adapter->tx_pool);
1209 adapter->tx_pool = NULL;
1210 kfree(adapter->tso_pool);
1211 adapter->tso_pool = NULL;
1212 adapter->num_active_tx_pools = 0;
1213 adapter->prev_tx_pool_size = 0;
1216 static int init_one_tx_pool(struct net_device *netdev,
1217 struct ibmvnic_tx_pool *tx_pool,
1218 int pool_size, int buf_size)
1222 tx_pool->tx_buff = kcalloc(pool_size,
1223 sizeof(struct ibmvnic_tx_buff),
1225 if (!tx_pool->tx_buff)
1228 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1229 if (!tx_pool->free_map) {
1230 kfree(tx_pool->tx_buff);
1231 tx_pool->tx_buff = NULL;
1235 for (i = 0; i < pool_size; i++)
1236 tx_pool->free_map[i] = i;
1238 tx_pool->consumer_index = 0;
1239 tx_pool->producer_index = 0;
1240 tx_pool->num_buffers = pool_size;
1241 tx_pool->buf_size = buf_size;
1247 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1248 * @adapter: ibmvnic adapter
1250 * Check if the existing tx pools in the adapter can be reused. The
1251 * pools can be reused if the pool parameters (number of pools,
1252 * number of buffers in the pool and mtu) have not changed.
1254 * NOTE: This assumes that all pools have the same number of buffers
1255 * which is the case currently. If that changes, we must fix this.
1257 * Return: true if the tx pools can be reused, false otherwise.
1259 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1261 u64 old_num_pools, new_num_pools;
1262 u64 old_pool_size, new_pool_size;
1263 u64 old_mtu, new_mtu;
1265 if (!adapter->tx_pool)
1268 old_num_pools = adapter->num_active_tx_pools;
1269 new_num_pools = adapter->num_active_tx_scrqs;
1270 old_pool_size = adapter->prev_tx_pool_size;
1271 new_pool_size = adapter->req_tx_entries_per_subcrq;
1272 old_mtu = adapter->prev_mtu;
1273 new_mtu = adapter->req_mtu;
1275 if (old_mtu != new_mtu ||
1276 old_num_pools != new_num_pools ||
1277 old_pool_size != new_pool_size)
1284 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1285 * @netdev: net device associated with the vnic interface
1287 * Initialize the set of transmit pools in the ibmvnic adapter associated
1288 * with the net_device @netdev. If possible, reuse the existing tx pools.
1289 * Otherwise free any existing pools and allocate a new set of pools
1290 * before initializing them.
1292 * Return: 0 on success and negative value on error.
1294 static int init_tx_pools(struct net_device *netdev)
1296 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1297 struct device *dev = &adapter->vdev->dev;
1299 u64 pool_size; /* # of buffers in pool */
1303 num_pools = adapter->req_tx_queues;
1305 /* We must notify the VIOS about the LTB on all resets - but we only
1306 * need to alloc/populate pools if either the number of buffers or
1307 * size of each buffer in the pool has changed.
1309 if (reuse_tx_pools(adapter)) {
1310 netdev_dbg(netdev, "Reusing tx pools\n");
1314 /* Allocate/populate the pools. */
1315 release_tx_pools(adapter);
1317 pool_size = adapter->req_tx_entries_per_subcrq;
1318 num_pools = adapter->num_active_tx_scrqs;
1320 adapter->tx_pool = kcalloc(num_pools,
1321 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1322 if (!adapter->tx_pool)
1325 adapter->tso_pool = kcalloc(num_pools,
1326 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1327 /* To simplify release_tx_pools() ensure that ->tx_pool and
1328 * ->tso_pool are either both NULL or both non-NULL.
1330 if (!adapter->tso_pool) {
1331 kfree(adapter->tx_pool);
1332 adapter->tx_pool = NULL;
1336 /* Set num_active_tx_pools early. If we fail below after partial
1337 * allocation, release_tx_pools() will know how many to look for.
1339 adapter->num_active_tx_pools = num_pools;
1341 buff_size = adapter->req_mtu + VLAN_HLEN;
1342 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1344 for (i = 0; i < num_pools; i++) {
1345 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1346 i, adapter->req_tx_entries_per_subcrq, buff_size);
1348 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1349 pool_size, buff_size);
1353 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1355 IBMVNIC_TSO_BUF_SZ);
1360 adapter->prev_tx_pool_size = pool_size;
1361 adapter->prev_mtu = adapter->req_mtu;
1364 /* NOTE: All tx_pools have the same number of buffers (which is
1365 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1366 * buffers (see calls init_one_tx_pool() for these).
1367 * For consistency, we use tx_pool->num_buffers and
1368 * tso_pool->num_buffers below.
1371 for (i = 0; i < num_pools; i++) {
1372 struct ibmvnic_tx_pool *tso_pool;
1373 struct ibmvnic_tx_pool *tx_pool;
1375 tx_pool = &adapter->tx_pool[i];
1377 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1378 i, tx_pool->num_buffers, tx_pool->buf_size);
1380 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1381 tx_pool->num_buffers, tx_pool->buf_size);
1385 tx_pool->consumer_index = 0;
1386 tx_pool->producer_index = 0;
1388 for (j = 0; j < tx_pool->num_buffers; j++)
1389 tx_pool->free_map[j] = j;
1391 tso_pool = &adapter->tso_pool[i];
1393 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1394 i, tso_pool->num_buffers, tso_pool->buf_size);
1396 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1397 tso_pool->num_buffers, tso_pool->buf_size);
1401 tso_pool->consumer_index = 0;
1402 tso_pool->producer_index = 0;
1404 for (j = 0; j < tso_pool->num_buffers; j++)
1405 tso_pool->free_map[j] = j;
1410 release_tx_pools(adapter);
1412 /* We failed to allocate one or more LTBs or map them on the VIOS.
1413 * Hold onto the pools and any LTBs that we did allocate/map.
1418 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1422 if (adapter->napi_enabled)
1425 for (i = 0; i < adapter->req_rx_queues; i++)
1426 napi_enable(&adapter->napi[i]);
1428 adapter->napi_enabled = true;
1431 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1435 if (!adapter->napi_enabled)
1438 for (i = 0; i < adapter->req_rx_queues; i++) {
1439 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1440 napi_disable(&adapter->napi[i]);
1443 adapter->napi_enabled = false;
1446 static int init_napi(struct ibmvnic_adapter *adapter)
1450 adapter->napi = kcalloc(adapter->req_rx_queues,
1451 sizeof(struct napi_struct), GFP_KERNEL);
1455 for (i = 0; i < adapter->req_rx_queues; i++) {
1456 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1457 netif_napi_add(adapter->netdev, &adapter->napi[i],
1461 adapter->num_active_rx_napi = adapter->req_rx_queues;
1465 static void release_napi(struct ibmvnic_adapter *adapter)
1472 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1473 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1474 netif_napi_del(&adapter->napi[i]);
1477 kfree(adapter->napi);
1478 adapter->napi = NULL;
1479 adapter->num_active_rx_napi = 0;
1480 adapter->napi_enabled = false;
1483 static const char *adapter_state_to_string(enum vnic_state state)
1508 static int ibmvnic_login(struct net_device *netdev)
1510 unsigned long flags, timeout = msecs_to_jiffies(20000);
1511 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1512 int retry_count = 0;
1519 if (retry_count > retries) {
1520 netdev_warn(netdev, "Login attempts exceeded\n");
1524 adapter->init_done_rc = 0;
1525 reinit_completion(&adapter->init_done);
1526 rc = send_login(adapter);
1530 if (!wait_for_completion_timeout(&adapter->init_done,
1532 netdev_warn(netdev, "Login timed out\n");
1533 adapter->login_pending = false;
1537 if (adapter->init_done_rc == ABORTED) {
1538 netdev_warn(netdev, "Login aborted, retrying...\n");
1540 adapter->init_done_rc = 0;
1542 /* FW or device may be busy, so
1543 * wait a bit before retrying login
1546 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1548 release_sub_crqs(adapter, 1);
1552 "Received partial success, retrying...\n");
1553 adapter->init_done_rc = 0;
1554 reinit_completion(&adapter->init_done);
1555 send_query_cap(adapter);
1556 if (!wait_for_completion_timeout(&adapter->init_done,
1559 "Capabilities query timed out\n");
1563 rc = init_sub_crqs(adapter);
1566 "SCRQ initialization failed\n");
1570 rc = init_sub_crq_irqs(adapter);
1573 "SCRQ irq initialization failed\n");
1576 /* Default/timeout error handling, reset and start fresh */
1577 } else if (adapter->init_done_rc) {
1578 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1579 adapter->init_done_rc);
1582 /* adapter login failed, so free any CRQs or sub-CRQs
1583 * and register again before attempting to login again.
1584 * If we don't do this then the VIOS may think that
1585 * we are already logged in and reject any subsequent
1589 "Freeing and re-registering CRQs before attempting to login again\n");
1591 adapter->init_done_rc = 0;
1592 release_sub_crqs(adapter, true);
1593 /* Much of this is similar logic as ibmvnic_probe(),
1594 * we are essentially re-initializing communication
1595 * with the server. We really should not run any
1596 * resets/failovers here because this is already a form
1597 * of reset and we do not want parallel resets occurring
1600 reinit_init_done(adapter);
1601 /* Clear any failovers we got in the previous
1602 * pass since we are re-initializing the CRQ
1604 adapter->failover_pending = false;
1605 release_crq_queue(adapter);
1606 /* If we don't sleep here then we risk an
1607 * unnecessary failover event from the VIOS.
1608 * This is a known VIOS issue caused by a vnic
1609 * device freeing and registering a CRQ too
1613 /* Avoid any resets, since we are currently
1616 spin_lock_irqsave(&adapter->rwi_lock, flags);
1617 flush_reset_queue(adapter);
1618 spin_unlock_irqrestore(&adapter->rwi_lock,
1621 rc = init_crq_queue(adapter);
1623 netdev_err(netdev, "login recovery: init CRQ failed %d\n",
1628 rc = ibmvnic_reset_init(adapter, false);
1630 netdev_err(netdev, "login recovery: Reset init failed %d\n",
1632 /* IBMVNIC_CRQ_INIT will return EAGAIN if it
1633 * fails, since ibmvnic_reset_init will free
1634 * irq's in failure, we won't be able to receive
1635 * new CRQs so we need to keep trying. probe()
1636 * handles this similarly.
1638 } while (rc == -EAGAIN && retry_count++ < retries);
1642 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1644 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1648 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1650 if (!adapter->login_buf)
1653 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1654 adapter->login_buf_sz, DMA_TO_DEVICE);
1655 kfree(adapter->login_buf);
1656 adapter->login_buf = NULL;
1659 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1661 if (!adapter->login_rsp_buf)
1664 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1665 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
1666 kfree(adapter->login_rsp_buf);
1667 adapter->login_rsp_buf = NULL;
1670 static void release_resources(struct ibmvnic_adapter *adapter)
1672 release_vpd_data(adapter);
1674 release_napi(adapter);
1675 release_login_buffer(adapter);
1676 release_login_rsp_buffer(adapter);
1679 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1681 struct net_device *netdev = adapter->netdev;
1682 unsigned long timeout = msecs_to_jiffies(20000);
1683 union ibmvnic_crq crq;
1687 netdev_dbg(netdev, "setting link state %d\n", link_state);
1689 memset(&crq, 0, sizeof(crq));
1690 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1691 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1692 crq.logical_link_state.link_state = link_state;
1697 reinit_completion(&adapter->init_done);
1698 rc = ibmvnic_send_crq(adapter, &crq);
1700 netdev_err(netdev, "Failed to set link state\n");
1704 if (!wait_for_completion_timeout(&adapter->init_done,
1706 netdev_err(netdev, "timeout setting link state\n");
1710 if (adapter->init_done_rc == PARTIALSUCCESS) {
1711 /* Partuial success, delay and re-send */
1714 } else if (adapter->init_done_rc) {
1715 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1716 adapter->init_done_rc);
1717 return adapter->init_done_rc;
1724 static int set_real_num_queues(struct net_device *netdev)
1726 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1729 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1730 adapter->req_tx_queues, adapter->req_rx_queues);
1732 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1734 netdev_err(netdev, "failed to set the number of tx queues\n");
1738 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1740 netdev_err(netdev, "failed to set the number of rx queues\n");
1745 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1747 struct device *dev = &adapter->vdev->dev;
1748 union ibmvnic_crq crq;
1752 if (adapter->vpd->buff)
1753 len = adapter->vpd->len;
1755 mutex_lock(&adapter->fw_lock);
1756 adapter->fw_done_rc = 0;
1757 reinit_completion(&adapter->fw_done);
1759 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1760 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1761 rc = ibmvnic_send_crq(adapter, &crq);
1763 mutex_unlock(&adapter->fw_lock);
1767 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1769 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1770 mutex_unlock(&adapter->fw_lock);
1773 mutex_unlock(&adapter->fw_lock);
1775 if (!adapter->vpd->len)
1778 if (!adapter->vpd->buff)
1779 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1780 else if (adapter->vpd->len != len)
1781 adapter->vpd->buff =
1782 krealloc(adapter->vpd->buff,
1783 adapter->vpd->len, GFP_KERNEL);
1785 if (!adapter->vpd->buff) {
1786 dev_err(dev, "Could allocate VPD buffer\n");
1790 adapter->vpd->dma_addr =
1791 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1793 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1794 dev_err(dev, "Could not map VPD buffer\n");
1795 kfree(adapter->vpd->buff);
1796 adapter->vpd->buff = NULL;
1800 mutex_lock(&adapter->fw_lock);
1801 adapter->fw_done_rc = 0;
1802 reinit_completion(&adapter->fw_done);
1804 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1805 crq.get_vpd.cmd = GET_VPD;
1806 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1807 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1808 rc = ibmvnic_send_crq(adapter, &crq);
1810 kfree(adapter->vpd->buff);
1811 adapter->vpd->buff = NULL;
1812 mutex_unlock(&adapter->fw_lock);
1816 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1818 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1819 kfree(adapter->vpd->buff);
1820 adapter->vpd->buff = NULL;
1821 mutex_unlock(&adapter->fw_lock);
1825 mutex_unlock(&adapter->fw_lock);
1829 static int init_resources(struct ibmvnic_adapter *adapter)
1831 struct net_device *netdev = adapter->netdev;
1834 rc = set_real_num_queues(netdev);
1838 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1842 /* Vital Product Data (VPD) */
1843 rc = ibmvnic_get_vpd(adapter);
1845 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1849 rc = init_napi(adapter);
1853 send_query_map(adapter);
1855 rc = init_rx_pools(netdev);
1859 rc = init_tx_pools(netdev);
1863 static int __ibmvnic_open(struct net_device *netdev)
1865 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1866 enum vnic_state prev_state = adapter->state;
1869 adapter->state = VNIC_OPENING;
1870 replenish_pools(adapter);
1871 ibmvnic_napi_enable(adapter);
1873 /* We're ready to receive frames, enable the sub-crq interrupts and
1874 * set the logical link state to up
1876 for (i = 0; i < adapter->req_rx_queues; i++) {
1877 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1878 if (prev_state == VNIC_CLOSED)
1879 enable_irq(adapter->rx_scrq[i]->irq);
1880 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1883 for (i = 0; i < adapter->req_tx_queues; i++) {
1884 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1885 if (prev_state == VNIC_CLOSED)
1886 enable_irq(adapter->tx_scrq[i]->irq);
1887 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1888 /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
1889 * resets, don't reset the stats because there could be batched
1890 * skb's waiting to be sent. If we reset dql stats, we risk
1891 * num_completed being greater than num_queued. This will cause
1892 * a BUG_ON in dql_completed().
1894 if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
1895 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1898 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1900 ibmvnic_napi_disable(adapter);
1901 ibmvnic_disable_irqs(adapter);
1905 adapter->tx_queues_active = true;
1907 /* Since queues were stopped until now, there shouldn't be any
1908 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1909 * don't need the synchronize_rcu()? Leaving it for consistency
1910 * with setting ->tx_queues_active = false.
1914 netif_tx_start_all_queues(netdev);
1916 if (prev_state == VNIC_CLOSED) {
1917 for (i = 0; i < adapter->req_rx_queues; i++)
1918 napi_schedule(&adapter->napi[i]);
1921 adapter->state = VNIC_OPEN;
1925 static int ibmvnic_open(struct net_device *netdev)
1927 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1932 /* If device failover is pending or we are about to reset, just set
1933 * device state and return. Device operation will be handled by reset
1936 * It should be safe to overwrite the adapter->state here. Since
1937 * we hold the rtnl, either the reset has not actually started or
1938 * the rtnl got dropped during the set_link_state() in do_reset().
1939 * In the former case, no one else is changing the state (again we
1940 * have the rtnl) and in the latter case, do_reset() will detect and
1941 * honor our setting below.
1943 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1944 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1945 adapter_state_to_string(adapter->state),
1946 adapter->failover_pending);
1947 adapter->state = VNIC_OPEN;
1952 if (adapter->state != VNIC_CLOSED) {
1953 rc = ibmvnic_login(netdev);
1957 rc = init_resources(adapter);
1959 netdev_err(netdev, "failed to initialize resources\n");
1964 rc = __ibmvnic_open(netdev);
1967 /* If open failed and there is a pending failover or in-progress reset,
1968 * set device state and return. Device operation will be handled by
1969 * reset routine. See also comments above regarding rtnl.
1972 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1973 adapter->state = VNIC_OPEN;
1978 release_resources(adapter);
1979 release_rx_pools(adapter);
1980 release_tx_pools(adapter);
1986 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1988 struct ibmvnic_rx_pool *rx_pool;
1989 struct ibmvnic_rx_buff *rx_buff;
1994 if (!adapter->rx_pool)
1997 rx_scrqs = adapter->num_active_rx_pools;
1998 rx_entries = adapter->req_rx_add_entries_per_subcrq;
2000 /* Free any remaining skbs in the rx buffer pools */
2001 for (i = 0; i < rx_scrqs; i++) {
2002 rx_pool = &adapter->rx_pool[i];
2003 if (!rx_pool || !rx_pool->rx_buff)
2006 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2007 for (j = 0; j < rx_entries; j++) {
2008 rx_buff = &rx_pool->rx_buff[j];
2009 if (rx_buff && rx_buff->skb) {
2010 dev_kfree_skb_any(rx_buff->skb);
2011 rx_buff->skb = NULL;
2017 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2018 struct ibmvnic_tx_pool *tx_pool)
2020 struct ibmvnic_tx_buff *tx_buff;
2024 if (!tx_pool || !tx_pool->tx_buff)
2027 tx_entries = tx_pool->num_buffers;
2029 for (i = 0; i < tx_entries; i++) {
2030 tx_buff = &tx_pool->tx_buff[i];
2031 if (tx_buff && tx_buff->skb) {
2032 dev_kfree_skb_any(tx_buff->skb);
2033 tx_buff->skb = NULL;
2038 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2043 if (!adapter->tx_pool || !adapter->tso_pool)
2046 tx_scrqs = adapter->num_active_tx_pools;
2048 /* Free any remaining skbs in the tx buffer pools */
2049 for (i = 0; i < tx_scrqs; i++) {
2050 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2051 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2052 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2056 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2058 struct net_device *netdev = adapter->netdev;
2061 if (adapter->tx_scrq) {
2062 for (i = 0; i < adapter->req_tx_queues; i++)
2063 if (adapter->tx_scrq[i]->irq) {
2065 "Disabling tx_scrq[%d] irq\n", i);
2066 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
2067 disable_irq(adapter->tx_scrq[i]->irq);
2071 if (adapter->rx_scrq) {
2072 for (i = 0; i < adapter->req_rx_queues; i++) {
2073 if (adapter->rx_scrq[i]->irq) {
2075 "Disabling rx_scrq[%d] irq\n", i);
2076 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
2077 disable_irq(adapter->rx_scrq[i]->irq);
2083 static void ibmvnic_cleanup(struct net_device *netdev)
2085 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2087 /* ensure that transmissions are stopped if called by do_reset */
2089 adapter->tx_queues_active = false;
2091 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
2092 * update so they don't restart a queue after we stop it below.
2096 if (test_bit(0, &adapter->resetting))
2097 netif_tx_disable(netdev);
2099 netif_tx_stop_all_queues(netdev);
2101 ibmvnic_napi_disable(adapter);
2102 ibmvnic_disable_irqs(adapter);
2105 static int __ibmvnic_close(struct net_device *netdev)
2107 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2110 adapter->state = VNIC_CLOSING;
2111 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2112 adapter->state = VNIC_CLOSED;
2116 static int ibmvnic_close(struct net_device *netdev)
2118 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2121 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
2122 adapter_state_to_string(adapter->state),
2123 adapter->failover_pending,
2124 adapter->force_reset_recovery);
2126 /* If device failover is pending, just set device state and return.
2127 * Device operation will be handled by reset routine.
2129 if (adapter->failover_pending) {
2130 adapter->state = VNIC_CLOSED;
2134 rc = __ibmvnic_close(netdev);
2135 ibmvnic_cleanup(netdev);
2136 clean_rx_pools(adapter);
2137 clean_tx_pools(adapter);
2143 * build_hdr_data - creates L2/L3/L4 header data buffer
2144 * @hdr_field: bitfield determining needed headers
2145 * @skb: socket buffer
2146 * @hdr_len: array of header lengths
2147 * @hdr_data: buffer to write the header to
2149 * Reads hdr_field to determine which headers are needed by firmware.
2150 * Builds a buffer containing these headers. Saves individual header
2151 * lengths and total buffer length to be used to build descriptors.
2153 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
2154 int *hdr_len, u8 *hdr_data)
2159 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
2160 hdr_len[0] = sizeof(struct vlan_ethhdr);
2162 hdr_len[0] = sizeof(struct ethhdr);
2164 if (skb->protocol == htons(ETH_P_IP)) {
2165 hdr_len[1] = ip_hdr(skb)->ihl * 4;
2166 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2167 hdr_len[2] = tcp_hdrlen(skb);
2168 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2169 hdr_len[2] = sizeof(struct udphdr);
2170 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2171 hdr_len[1] = sizeof(struct ipv6hdr);
2172 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2173 hdr_len[2] = tcp_hdrlen(skb);
2174 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
2175 hdr_len[2] = sizeof(struct udphdr);
2176 } else if (skb->protocol == htons(ETH_P_ARP)) {
2177 hdr_len[1] = arp_hdr_len(skb->dev);
2181 memset(hdr_data, 0, 120);
2182 if ((hdr_field >> 6) & 1) {
2183 hdr = skb_mac_header(skb);
2184 memcpy(hdr_data, hdr, hdr_len[0]);
2188 if ((hdr_field >> 5) & 1) {
2189 hdr = skb_network_header(skb);
2190 memcpy(hdr_data + len, hdr, hdr_len[1]);
2194 if ((hdr_field >> 4) & 1) {
2195 hdr = skb_transport_header(skb);
2196 memcpy(hdr_data + len, hdr, hdr_len[2]);
2203 * create_hdr_descs - create header and header extension descriptors
2204 * @hdr_field: bitfield determining needed headers
2205 * @hdr_data: buffer containing header data
2206 * @len: length of data buffer
2207 * @hdr_len: array of individual header lengths
2208 * @scrq_arr: descriptor array
2210 * Creates header and, if needed, header extension descriptors and
2211 * places them in a descriptor array, scrq_arr
2214 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
2215 union sub_crq *scrq_arr)
2217 union sub_crq hdr_desc;
2223 while (tmp_len > 0) {
2224 cur = hdr_data + len - tmp_len;
2226 memset(&hdr_desc, 0, sizeof(hdr_desc));
2227 if (cur != hdr_data) {
2228 data = hdr_desc.hdr_ext.data;
2229 tmp = tmp_len > 29 ? 29 : tmp_len;
2230 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
2231 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
2232 hdr_desc.hdr_ext.len = tmp;
2234 data = hdr_desc.hdr.data;
2235 tmp = tmp_len > 24 ? 24 : tmp_len;
2236 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
2237 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
2238 hdr_desc.hdr.len = tmp;
2239 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
2240 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
2241 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
2242 hdr_desc.hdr.flag = hdr_field << 1;
2244 memcpy(data, cur, tmp);
2246 *scrq_arr = hdr_desc;
2255 * build_hdr_descs_arr - build a header descriptor array
2256 * @skb: tx socket buffer
2257 * @indir_arr: indirect array
2258 * @num_entries: number of descriptors to be sent
2259 * @hdr_field: bit field determining which headers will be sent
2261 * This function will build a TX descriptor array with applicable
2262 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
2265 static void build_hdr_descs_arr(struct sk_buff *skb,
2266 union sub_crq *indir_arr,
2267 int *num_entries, u8 hdr_field)
2269 int hdr_len[3] = {0, 0, 0};
2270 u8 hdr_data[140] = {0};
2273 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
2275 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
2279 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2280 struct net_device *netdev)
2282 /* For some backing devices, mishandling of small packets
2283 * can result in a loss of connection or TX stall. Device
2284 * architects recommend that no packet should be smaller
2285 * than the minimum MTU value provided to the driver, so
2286 * pad any packets to that length
2288 if (skb->len < netdev->min_mtu)
2289 return skb_put_padto(skb, netdev->min_mtu);
2294 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2295 struct ibmvnic_sub_crq_queue *tx_scrq)
2297 struct ibmvnic_ind_xmit_queue *ind_bufp;
2298 struct ibmvnic_tx_buff *tx_buff;
2299 struct ibmvnic_tx_pool *tx_pool;
2300 union sub_crq tx_scrq_entry;
2306 ind_bufp = &tx_scrq->ind_buf;
2307 entries = (u64)ind_bufp->index;
2308 queue_num = tx_scrq->pool_index;
2310 for (i = entries - 1; i >= 0; --i) {
2311 tx_scrq_entry = ind_bufp->indir_arr[i];
2312 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2314 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2315 if (index & IBMVNIC_TSO_POOL_MASK) {
2316 tx_pool = &adapter->tso_pool[queue_num];
2317 index &= ~IBMVNIC_TSO_POOL_MASK;
2319 tx_pool = &adapter->tx_pool[queue_num];
2321 tx_pool->free_map[tx_pool->consumer_index] = index;
2322 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2323 tx_pool->num_buffers - 1 :
2324 tx_pool->consumer_index - 1;
2325 tx_buff = &tx_pool->tx_buff[index];
2326 adapter->netdev->stats.tx_packets--;
2327 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2328 adapter->tx_stats_buffers[queue_num].packets--;
2329 adapter->tx_stats_buffers[queue_num].bytes -=
2331 dev_kfree_skb_any(tx_buff->skb);
2332 tx_buff->skb = NULL;
2333 adapter->netdev->stats.tx_dropped++;
2336 ind_bufp->index = 0;
2338 if (atomic_sub_return(entries, &tx_scrq->used) <=
2339 (adapter->req_tx_entries_per_subcrq / 2) &&
2340 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2343 if (adapter->tx_queues_active) {
2344 netif_wake_subqueue(adapter->netdev, queue_num);
2345 netdev_dbg(adapter->netdev, "Started queue %d\n",
2353 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2354 struct ibmvnic_sub_crq_queue *tx_scrq)
2356 struct ibmvnic_ind_xmit_queue *ind_bufp;
2362 ind_bufp = &tx_scrq->ind_buf;
2363 dma_addr = (u64)ind_bufp->indir_dma;
2364 entries = (u64)ind_bufp->index;
2365 handle = tx_scrq->handle;
2369 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2371 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2373 ind_bufp->index = 0;
2377 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2379 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2380 int queue_num = skb_get_queue_mapping(skb);
2381 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2382 struct device *dev = &adapter->vdev->dev;
2383 struct ibmvnic_ind_xmit_queue *ind_bufp;
2384 struct ibmvnic_tx_buff *tx_buff = NULL;
2385 struct ibmvnic_sub_crq_queue *tx_scrq;
2386 struct ibmvnic_long_term_buff *ltb;
2387 struct ibmvnic_tx_pool *tx_pool;
2388 unsigned int tx_send_failed = 0;
2389 netdev_tx_t ret = NETDEV_TX_OK;
2390 unsigned int tx_map_failed = 0;
2391 union sub_crq indir_arr[16];
2392 unsigned int tx_dropped = 0;
2393 unsigned int tx_packets = 0;
2394 unsigned int tx_bytes = 0;
2395 dma_addr_t data_dma_addr;
2396 struct netdev_queue *txq;
2397 unsigned long lpar_rc;
2398 union sub_crq tx_crq;
2399 unsigned int offset;
2400 int num_entries = 1;
2405 /* If a reset is in progress, drop the packet since
2406 * the scrqs may get torn down. Otherwise use the
2407 * rcu to ensure reset waits for us to complete.
2410 if (!adapter->tx_queues_active) {
2411 dev_kfree_skb_any(skb);
2419 tx_scrq = adapter->tx_scrq[queue_num];
2420 txq = netdev_get_tx_queue(netdev, queue_num);
2421 ind_bufp = &tx_scrq->ind_buf;
2423 if (ibmvnic_xmit_workarounds(skb, netdev)) {
2427 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2431 if (skb_is_gso(skb))
2432 tx_pool = &adapter->tso_pool[queue_num];
2434 tx_pool = &adapter->tx_pool[queue_num];
2436 bufidx = tx_pool->free_map[tx_pool->consumer_index];
2438 if (bufidx == IBMVNIC_INVALID_MAP) {
2439 dev_kfree_skb_any(skb);
2442 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2447 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2449 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset);
2451 dst = ltb->buff + offset;
2452 memset(dst, 0, tx_pool->buf_size);
2453 data_dma_addr = ltb->addr + offset;
2455 if (skb_shinfo(skb)->nr_frags) {
2459 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2460 cur = skb_headlen(skb);
2462 /* Copy the frags */
2463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2464 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2466 memcpy(dst + cur, skb_frag_address(frag),
2467 skb_frag_size(frag));
2468 cur += skb_frag_size(frag);
2471 skb_copy_from_linear_data(skb, dst, skb->len);
2474 /* post changes to long_term_buff *dst before VIOS accessing it */
2477 tx_pool->consumer_index =
2478 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2480 tx_buff = &tx_pool->tx_buff[bufidx];
2482 tx_buff->index = bufidx;
2483 tx_buff->pool_index = queue_num;
2485 memset(&tx_crq, 0, sizeof(tx_crq));
2486 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2487 tx_crq.v1.type = IBMVNIC_TX_DESC;
2488 tx_crq.v1.n_crq_elem = 1;
2489 tx_crq.v1.n_sge = 1;
2490 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2492 if (skb_is_gso(skb))
2493 tx_crq.v1.correlator =
2494 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2496 tx_crq.v1.correlator = cpu_to_be32(bufidx);
2497 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2498 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2499 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2501 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2502 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2503 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2506 if (skb->protocol == htons(ETH_P_IP)) {
2507 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2508 proto = ip_hdr(skb)->protocol;
2509 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2510 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2511 proto = ipv6_hdr(skb)->nexthdr;
2514 if (proto == IPPROTO_TCP)
2515 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2516 else if (proto == IPPROTO_UDP)
2517 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2519 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2520 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2523 if (skb_is_gso(skb)) {
2524 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2525 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2529 if ((*hdrs >> 7) & 1)
2530 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2532 tx_crq.v1.n_crq_elem = num_entries;
2533 tx_buff->num_entries = num_entries;
2534 /* flush buffer if current entry can not fit */
2535 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2536 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2537 if (lpar_rc != H_SUCCESS)
2541 indir_arr[0] = tx_crq;
2542 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2543 num_entries * sizeof(struct ibmvnic_generic_scrq));
2544 ind_bufp->index += num_entries;
2545 if (__netdev_tx_sent_queue(txq, skb->len,
2546 netdev_xmit_more() &&
2547 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2548 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2549 if (lpar_rc != H_SUCCESS)
2553 if (atomic_add_return(num_entries, &tx_scrq->used)
2554 >= adapter->req_tx_entries_per_subcrq) {
2555 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2556 netif_stop_subqueue(netdev, queue_num);
2560 tx_bytes += skb->len;
2561 txq_trans_cond_update(txq);
2566 dev_kfree_skb_any(skb);
2567 tx_buff->skb = NULL;
2568 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2569 tx_pool->num_buffers - 1 :
2570 tx_pool->consumer_index - 1;
2573 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2574 dev_err_ratelimited(dev, "tx: send failed\n");
2576 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2577 /* Disable TX and report carrier off if queue is closed
2578 * or pending failover.
2579 * Firmware guarantees that a signal will be sent to the
2580 * driver, triggering a reset or some other action.
2582 netif_tx_stop_all_queues(netdev);
2583 netif_carrier_off(netdev);
2587 netdev->stats.tx_dropped += tx_dropped;
2588 netdev->stats.tx_bytes += tx_bytes;
2589 netdev->stats.tx_packets += tx_packets;
2590 adapter->tx_send_failed += tx_send_failed;
2591 adapter->tx_map_failed += tx_map_failed;
2592 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2593 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2594 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2599 static void ibmvnic_set_multi(struct net_device *netdev)
2601 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2602 struct netdev_hw_addr *ha;
2603 union ibmvnic_crq crq;
2605 memset(&crq, 0, sizeof(crq));
2606 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2607 crq.request_capability.cmd = REQUEST_CAPABILITY;
2609 if (netdev->flags & IFF_PROMISC) {
2610 if (!adapter->promisc_supported)
2613 if (netdev->flags & IFF_ALLMULTI) {
2614 /* Accept all multicast */
2615 memset(&crq, 0, sizeof(crq));
2616 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2617 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2618 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2619 ibmvnic_send_crq(adapter, &crq);
2620 } else if (netdev_mc_empty(netdev)) {
2621 /* Reject all multicast */
2622 memset(&crq, 0, sizeof(crq));
2623 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2624 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2625 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2626 ibmvnic_send_crq(adapter, &crq);
2628 /* Accept one or more multicast(s) */
2629 netdev_for_each_mc_addr(ha, netdev) {
2630 memset(&crq, 0, sizeof(crq));
2631 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2632 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2633 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2634 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2636 ibmvnic_send_crq(adapter, &crq);
2642 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2645 union ibmvnic_crq crq;
2648 if (!is_valid_ether_addr(dev_addr)) {
2649 rc = -EADDRNOTAVAIL;
2653 memset(&crq, 0, sizeof(crq));
2654 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2655 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2656 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2658 mutex_lock(&adapter->fw_lock);
2659 adapter->fw_done_rc = 0;
2660 reinit_completion(&adapter->fw_done);
2662 rc = ibmvnic_send_crq(adapter, &crq);
2665 mutex_unlock(&adapter->fw_lock);
2669 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2670 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2671 if (rc || adapter->fw_done_rc) {
2673 mutex_unlock(&adapter->fw_lock);
2676 mutex_unlock(&adapter->fw_lock);
2679 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2683 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2685 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2686 struct sockaddr *addr = p;
2690 if (!is_valid_ether_addr(addr->sa_data))
2691 return -EADDRNOTAVAIL;
2693 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2694 if (adapter->state != VNIC_PROBED)
2695 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2700 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2703 case VNIC_RESET_FAILOVER:
2705 case VNIC_RESET_MOBILITY:
2707 case VNIC_RESET_FATAL:
2709 case VNIC_RESET_NON_FATAL:
2711 case VNIC_RESET_TIMEOUT:
2713 case VNIC_RESET_CHANGE_PARAM:
2714 return "CHANGE_PARAM";
2715 case VNIC_RESET_PASSIVE_INIT:
2716 return "PASSIVE_INIT";
2722 * Initialize the init_done completion and return code values. We
2723 * can get a transport event just after registering the CRQ and the
2724 * tasklet will use this to communicate the transport event. To ensure
2725 * we don't miss the notification/error, initialize these _before_
2726 * regisering the CRQ.
2728 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2730 reinit_completion(&adapter->init_done);
2731 adapter->init_done_rc = 0;
2735 * do_reset returns zero if we are able to keep processing reset events, or
2736 * non-zero if we hit a fatal error and must halt.
2738 static int do_reset(struct ibmvnic_adapter *adapter,
2739 struct ibmvnic_rwi *rwi, u32 reset_state)
2741 struct net_device *netdev = adapter->netdev;
2742 u64 old_num_rx_queues, old_num_tx_queues;
2743 u64 old_num_rx_slots, old_num_tx_slots;
2746 netdev_dbg(adapter->netdev,
2747 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2748 adapter_state_to_string(adapter->state),
2749 adapter->failover_pending,
2750 reset_reason_to_string(rwi->reset_reason),
2751 adapter_state_to_string(reset_state));
2753 adapter->reset_reason = rwi->reset_reason;
2754 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2755 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2758 /* Now that we have the rtnl lock, clear any pending failover.
2759 * This will ensure ibmvnic_open() has either completed or will
2760 * block until failover is complete.
2762 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2763 adapter->failover_pending = false;
2765 /* read the state and check (again) after getting rtnl */
2766 reset_state = adapter->state;
2768 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2773 netif_carrier_off(netdev);
2775 old_num_rx_queues = adapter->req_rx_queues;
2776 old_num_tx_queues = adapter->req_tx_queues;
2777 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2778 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2780 ibmvnic_cleanup(netdev);
2782 if (reset_state == VNIC_OPEN &&
2783 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2784 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2785 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2786 rc = __ibmvnic_close(netdev);
2790 adapter->state = VNIC_CLOSING;
2792 /* Release the RTNL lock before link state change and
2793 * re-acquire after the link state change to allow
2794 * linkwatch_event to grab the RTNL lock and run during
2798 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2803 if (adapter->state == VNIC_OPEN) {
2804 /* When we dropped rtnl, ibmvnic_open() got
2805 * it and noticed that we are resetting and
2806 * set the adapter state to OPEN. Update our
2807 * new "target" state, and resume the reset
2808 * from VNIC_CLOSING state.
2811 "Open changed state from %s, updating.\n",
2812 adapter_state_to_string(reset_state));
2813 reset_state = VNIC_OPEN;
2814 adapter->state = VNIC_CLOSING;
2817 if (adapter->state != VNIC_CLOSING) {
2818 /* If someone else changed the adapter state
2819 * when we dropped the rtnl, fail the reset
2824 adapter->state = VNIC_CLOSED;
2828 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2829 release_resources(adapter);
2830 release_sub_crqs(adapter, 1);
2831 release_crq_queue(adapter);
2834 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2835 /* remove the closed state so when we call open it appears
2836 * we are coming from the probed state.
2838 adapter->state = VNIC_PROBED;
2840 reinit_init_done(adapter);
2842 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2843 rc = init_crq_queue(adapter);
2844 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2845 rc = ibmvnic_reenable_crq_queue(adapter);
2846 release_sub_crqs(adapter, 1);
2848 rc = ibmvnic_reset_crq(adapter);
2849 if (rc == H_CLOSED || rc == H_SUCCESS) {
2850 rc = vio_enable_interrupts(adapter->vdev);
2852 netdev_err(adapter->netdev,
2853 "Reset failed to enable interrupts. rc=%d\n",
2859 netdev_err(adapter->netdev,
2860 "Reset couldn't initialize crq. rc=%d\n", rc);
2864 rc = ibmvnic_reset_init(adapter, true);
2868 /* If the adapter was in PROBE or DOWN state prior to the reset,
2871 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2876 rc = ibmvnic_login(netdev);
2880 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2881 rc = init_resources(adapter);
2884 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2885 adapter->req_tx_queues != old_num_tx_queues ||
2886 adapter->req_rx_add_entries_per_subcrq !=
2888 adapter->req_tx_entries_per_subcrq !=
2890 !adapter->rx_pool ||
2891 !adapter->tso_pool ||
2892 !adapter->tx_pool) {
2893 release_napi(adapter);
2894 release_vpd_data(adapter);
2896 rc = init_resources(adapter);
2901 rc = init_tx_pools(netdev);
2904 "init tx pools failed (%d)\n",
2909 rc = init_rx_pools(netdev);
2912 "init rx pools failed (%d)\n",
2917 ibmvnic_disable_irqs(adapter);
2919 adapter->state = VNIC_CLOSED;
2921 if (reset_state == VNIC_CLOSED) {
2926 rc = __ibmvnic_open(netdev);
2928 rc = IBMVNIC_OPEN_FAILED;
2932 /* refresh device's multicast list */
2933 ibmvnic_set_multi(netdev);
2935 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2936 adapter->reset_reason == VNIC_RESET_MOBILITY)
2937 __netdev_notify_peers(netdev);
2942 /* restore the adapter state if reset failed */
2944 adapter->state = reset_state;
2945 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2946 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2949 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2950 adapter_state_to_string(adapter->state),
2951 adapter->failover_pending, rc);
2955 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2956 struct ibmvnic_rwi *rwi, u32 reset_state)
2958 struct net_device *netdev = adapter->netdev;
2961 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2962 reset_reason_to_string(rwi->reset_reason));
2964 /* read the state and check (again) after getting rtnl */
2965 reset_state = adapter->state;
2967 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2972 netif_carrier_off(netdev);
2973 adapter->reset_reason = rwi->reset_reason;
2975 ibmvnic_cleanup(netdev);
2976 release_resources(adapter);
2977 release_sub_crqs(adapter, 0);
2978 release_crq_queue(adapter);
2980 /* remove the closed state so when we call open it appears
2981 * we are coming from the probed state.
2983 adapter->state = VNIC_PROBED;
2985 reinit_init_done(adapter);
2987 rc = init_crq_queue(adapter);
2989 netdev_err(adapter->netdev,
2990 "Couldn't initialize crq. rc=%d\n", rc);
2994 rc = ibmvnic_reset_init(adapter, false);
2998 /* If the adapter was in PROBE or DOWN state prior to the reset,
3001 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
3004 rc = ibmvnic_login(netdev);
3008 rc = init_resources(adapter);
3012 ibmvnic_disable_irqs(adapter);
3013 adapter->state = VNIC_CLOSED;
3015 if (reset_state == VNIC_CLOSED)
3018 rc = __ibmvnic_open(netdev);
3020 rc = IBMVNIC_OPEN_FAILED;
3024 __netdev_notify_peers(netdev);
3026 /* restore adapter state if reset failed */
3028 adapter->state = reset_state;
3029 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
3030 adapter_state_to_string(adapter->state),
3031 adapter->failover_pending, rc);
3035 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3037 struct ibmvnic_rwi *rwi;
3038 unsigned long flags;
3040 spin_lock_irqsave(&adapter->rwi_lock, flags);
3042 if (!list_empty(&adapter->rwi_list)) {
3043 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3045 list_del(&rwi->list);
3050 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3055 * do_passive_init - complete probing when partner device is detected.
3056 * @adapter: ibmvnic_adapter struct
3058 * If the ibmvnic device does not have a partner device to communicate with at boot
3059 * and that partner device comes online at a later time, this function is called
3060 * to complete the initialization process of ibmvnic device.
3061 * Caller is expected to hold rtnl_lock().
3063 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
3064 * in the down state.
3065 * Returns 0 upon success and the device is in PROBED state.
3068 static int do_passive_init(struct ibmvnic_adapter *adapter)
3070 unsigned long timeout = msecs_to_jiffies(30000);
3071 struct net_device *netdev = adapter->netdev;
3072 struct device *dev = &adapter->vdev->dev;
3075 netdev_dbg(netdev, "Partner device found, probing.\n");
3077 adapter->state = VNIC_PROBING;
3078 reinit_completion(&adapter->init_done);
3079 adapter->init_done_rc = 0;
3080 adapter->crq.active = true;
3082 rc = send_crq_init_complete(adapter);
3086 rc = send_version_xchg(adapter);
3088 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
3090 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3091 dev_err(dev, "Initialization sequence timed out\n");
3096 rc = init_sub_crqs(adapter);
3098 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
3102 rc = init_sub_crq_irqs(adapter);
3104 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
3108 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3109 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3110 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3112 adapter->state = VNIC_PROBED;
3113 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
3118 release_sub_crqs(adapter, 1);
3120 adapter->state = VNIC_DOWN;
3124 static void __ibmvnic_reset(struct work_struct *work)
3126 struct ibmvnic_adapter *adapter;
3127 unsigned int timeout = 5000;
3128 struct ibmvnic_rwi *tmprwi;
3129 bool saved_state = false;
3130 struct ibmvnic_rwi *rwi;
3131 unsigned long flags;
3138 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3139 dev = &adapter->vdev->dev;
3141 /* Wait for ibmvnic_probe() to complete. If probe is taking too long
3142 * or if another reset is in progress, defer work for now. If probe
3143 * eventually fails it will flush and terminate our work.
3145 * Three possibilities here:
3146 * 1. Adpater being removed - just return
3147 * 2. Timed out on probe or another reset in progress - delay the work
3148 * 3. Completed probe - perform any resets in queue
3150 if (adapter->state == VNIC_PROBING &&
3151 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3152 dev_err(dev, "Reset thread timed out on probe");
3153 queue_delayed_work(system_long_wq,
3154 &adapter->ibmvnic_delayed_reset,
3155 IBMVNIC_RESET_DELAY);
3159 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3160 if (adapter->state == VNIC_REMOVING)
3163 /* ->rwi_list is stable now (no one else is removing entries) */
3165 /* ibmvnic_probe() may have purged the reset queue after we were
3166 * scheduled to process a reset so there maybe no resets to process.
3167 * Before setting the ->resetting bit though, we have to make sure
3168 * that there is infact a reset to process. Otherwise we may race
3169 * with ibmvnic_open() and end up leaving the vnic down:
3171 * __ibmvnic_reset() ibmvnic_open()
3172 * ----------------- --------------
3174 * set ->resetting bit
3175 * find ->resetting bit is set
3176 * set ->state to IBMVNIC_OPEN (i.e
3177 * assume reset will open device)
3179 * find reset queue empty
3182 * Neither performed vnic login/open and vnic stays down
3184 * If we hold the lock and conditionally set the bit, either we
3185 * or ibmvnic_open() will complete the open.
3188 spin_lock(&adapter->rwi_lock);
3189 if (!list_empty(&adapter->rwi_list)) {
3190 if (test_and_set_bit_lock(0, &adapter->resetting)) {
3191 queue_delayed_work(system_long_wq,
3192 &adapter->ibmvnic_delayed_reset,
3193 IBMVNIC_RESET_DELAY);
3198 spin_unlock(&adapter->rwi_lock);
3203 rwi = get_next_rwi(adapter);
3205 spin_lock_irqsave(&adapter->state_lock, flags);
3207 if (adapter->state == VNIC_REMOVING ||
3208 adapter->state == VNIC_REMOVED) {
3209 spin_unlock_irqrestore(&adapter->state_lock, flags);
3216 reset_state = adapter->state;
3219 spin_unlock_irqrestore(&adapter->state_lock, flags);
3221 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
3223 rc = do_passive_init(adapter);
3226 netif_carrier_on(adapter->netdev);
3227 } else if (adapter->force_reset_recovery) {
3228 /* Since we are doing a hard reset now, clear the
3229 * failover_pending flag so we don't ignore any
3230 * future MOBILITY or other resets.
3232 adapter->failover_pending = false;
3234 /* Transport event occurred during previous reset */
3235 if (adapter->wait_for_reset) {
3236 /* Previous was CHANGE_PARAM; caller locked */
3237 adapter->force_reset_recovery = false;
3238 rc = do_hard_reset(adapter, rwi, reset_state);
3241 adapter->force_reset_recovery = false;
3242 rc = do_hard_reset(adapter, rwi, reset_state);
3250 /* If auto-priority-failover is enabled we can get
3251 * back to back failovers during resets, resulting
3252 * in at least two failed resets (from high-priority
3253 * backing device to low-priority one and then back)
3254 * If resets continue to fail beyond that, give the
3255 * adapter some time to settle down before retrying.
3257 if (num_fails >= 3) {
3258 netdev_dbg(adapter->netdev,
3259 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
3260 adapter_state_to_string(adapter->state),
3262 set_current_state(TASK_UNINTERRUPTIBLE);
3263 schedule_timeout(60 * HZ);
3266 rc = do_reset(adapter, rwi, reset_state);
3269 adapter->last_reset_time = jiffies;
3272 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3274 rwi = get_next_rwi(adapter);
3277 * If there are no resets queued and the previous reset failed,
3278 * the adapter would be in an undefined state. So retry the
3279 * previous reset as a hard reset.
3281 * Else, free the previous rwi and, if there is another reset
3282 * queued, process the new reset even if previous reset failed
3283 * (the previous reset could have failed because of a fail
3284 * over for instance, so process the fail over).
3291 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3292 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3293 adapter->force_reset_recovery = true;
3296 if (adapter->wait_for_reset) {
3297 adapter->reset_done_rc = rc;
3298 complete(&adapter->reset_done);
3301 clear_bit_unlock(0, &adapter->resetting);
3303 netdev_dbg(adapter->netdev,
3304 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3305 adapter_state_to_string(adapter->state),
3306 adapter->force_reset_recovery,
3307 adapter->wait_for_reset);
3310 static void __ibmvnic_delayed_reset(struct work_struct *work)
3312 struct ibmvnic_adapter *adapter;
3314 adapter = container_of(work, struct ibmvnic_adapter,
3315 ibmvnic_delayed_reset.work);
3316 __ibmvnic_reset(&adapter->ibmvnic_reset);
3319 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3321 struct list_head *entry, *tmp_entry;
3323 if (!list_empty(&adapter->rwi_list)) {
3324 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3326 kfree(list_entry(entry, struct ibmvnic_rwi, list));
3331 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3332 enum ibmvnic_reset_reason reason)
3334 struct net_device *netdev = adapter->netdev;
3335 struct ibmvnic_rwi *rwi, *tmp;
3336 unsigned long flags;
3339 spin_lock_irqsave(&adapter->rwi_lock, flags);
3341 /* If failover is pending don't schedule any other reset.
3342 * Instead let the failover complete. If there is already a
3343 * a failover reset scheduled, we will detect and drop the
3344 * duplicate reset when walking the ->rwi_list below.
3346 if (adapter->state == VNIC_REMOVING ||
3347 adapter->state == VNIC_REMOVED ||
3348 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3350 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3354 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3355 if (tmp->reset_reason == reason) {
3356 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3357 reset_reason_to_string(reason));
3363 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3368 /* if we just received a transport event,
3369 * flush reset queue and process this reset
3371 if (adapter->force_reset_recovery)
3372 flush_reset_queue(adapter);
3374 rwi->reset_reason = reason;
3375 list_add_tail(&rwi->list, &adapter->rwi_list);
3376 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3377 reset_reason_to_string(reason));
3378 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3382 /* ibmvnic_close() below can block, so drop the lock first */
3383 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3386 ibmvnic_close(netdev);
3391 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3393 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3395 if (test_bit(0, &adapter->resetting)) {
3396 netdev_err(adapter->netdev,
3397 "Adapter is resetting, skip timeout reset\n");
3400 /* No queuing up reset until at least 5 seconds (default watchdog val)
3403 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3404 netdev_dbg(dev, "Not yet time to tx timeout.\n");
3407 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3410 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3411 struct ibmvnic_rx_buff *rx_buff)
3413 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3415 rx_buff->skb = NULL;
3417 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3418 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3420 atomic_dec(&pool->available);
3423 static int ibmvnic_poll(struct napi_struct *napi, int budget)
3425 struct ibmvnic_sub_crq_queue *rx_scrq;
3426 struct ibmvnic_adapter *adapter;
3427 struct net_device *netdev;
3428 int frames_processed;
3432 adapter = netdev_priv(netdev);
3433 scrq_num = (int)(napi - adapter->napi);
3434 frames_processed = 0;
3435 rx_scrq = adapter->rx_scrq[scrq_num];
3438 while (frames_processed < budget) {
3439 struct sk_buff *skb;
3440 struct ibmvnic_rx_buff *rx_buff;
3441 union sub_crq *next;
3446 if (unlikely(test_bit(0, &adapter->resetting) &&
3447 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3448 enable_scrq_irq(adapter, rx_scrq);
3449 napi_complete_done(napi, frames_processed);
3450 return frames_processed;
3453 if (!pending_scrq(adapter, rx_scrq))
3455 next = ibmvnic_next_scrq(adapter, rx_scrq);
3456 rx_buff = (struct ibmvnic_rx_buff *)
3457 be64_to_cpu(next->rx_comp.correlator);
3458 /* do error checking */
3459 if (next->rx_comp.rc) {
3460 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3461 be16_to_cpu(next->rx_comp.rc));
3462 /* free the entry */
3463 next->rx_comp.first = 0;
3464 dev_kfree_skb_any(rx_buff->skb);
3465 remove_buff_from_pool(adapter, rx_buff);
3467 } else if (!rx_buff->skb) {
3468 /* free the entry */
3469 next->rx_comp.first = 0;
3470 remove_buff_from_pool(adapter, rx_buff);
3474 length = be32_to_cpu(next->rx_comp.len);
3475 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3476 flags = next->rx_comp.flags;
3478 /* load long_term_buff before copying to skb */
3480 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3483 /* VLAN Header has been stripped by the system firmware and
3484 * needs to be inserted by the driver
3486 if (adapter->rx_vlan_header_insertion &&
3487 (flags & IBMVNIC_VLAN_STRIPPED))
3488 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3489 ntohs(next->rx_comp.vlan_tci));
3491 /* free the entry */
3492 next->rx_comp.first = 0;
3493 remove_buff_from_pool(adapter, rx_buff);
3495 skb_put(skb, length);
3496 skb->protocol = eth_type_trans(skb, netdev);
3497 skb_record_rx_queue(skb, scrq_num);
3499 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3500 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3501 skb->ip_summed = CHECKSUM_UNNECESSARY;
3505 napi_gro_receive(napi, skb); /* send it up */
3506 netdev->stats.rx_packets++;
3507 netdev->stats.rx_bytes += length;
3508 adapter->rx_stats_buffers[scrq_num].packets++;
3509 adapter->rx_stats_buffers[scrq_num].bytes += length;
3513 if (adapter->state != VNIC_CLOSING &&
3514 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3515 adapter->req_rx_add_entries_per_subcrq / 2) ||
3516 frames_processed < budget))
3517 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3518 if (frames_processed < budget) {
3519 if (napi_complete_done(napi, frames_processed)) {
3520 enable_scrq_irq(adapter, rx_scrq);
3521 if (pending_scrq(adapter, rx_scrq)) {
3522 if (napi_schedule(napi)) {
3523 disable_scrq_irq(adapter, rx_scrq);
3529 return frames_processed;
3532 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3536 adapter->fallback.mtu = adapter->req_mtu;
3537 adapter->fallback.rx_queues = adapter->req_rx_queues;
3538 adapter->fallback.tx_queues = adapter->req_tx_queues;
3539 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3540 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3542 reinit_completion(&adapter->reset_done);
3543 adapter->wait_for_reset = true;
3544 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3550 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3557 if (adapter->reset_done_rc) {
3559 adapter->desired.mtu = adapter->fallback.mtu;
3560 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3561 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3562 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3563 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3565 reinit_completion(&adapter->reset_done);
3566 adapter->wait_for_reset = true;
3567 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3572 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3580 adapter->wait_for_reset = false;
3585 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3587 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3589 adapter->desired.mtu = new_mtu + ETH_HLEN;
3591 return wait_for_reset(adapter);
3594 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3595 struct net_device *dev,
3596 netdev_features_t features)
3598 /* Some backing hardware adapters can not
3599 * handle packets with a MSS less than 224
3600 * or with only one segment.
3602 if (skb_is_gso(skb)) {
3603 if (skb_shinfo(skb)->gso_size < 224 ||
3604 skb_shinfo(skb)->gso_segs == 1)
3605 features &= ~NETIF_F_GSO_MASK;
3611 static const struct net_device_ops ibmvnic_netdev_ops = {
3612 .ndo_open = ibmvnic_open,
3613 .ndo_stop = ibmvnic_close,
3614 .ndo_start_xmit = ibmvnic_xmit,
3615 .ndo_set_rx_mode = ibmvnic_set_multi,
3616 .ndo_set_mac_address = ibmvnic_set_mac,
3617 .ndo_validate_addr = eth_validate_addr,
3618 .ndo_tx_timeout = ibmvnic_tx_timeout,
3619 .ndo_change_mtu = ibmvnic_change_mtu,
3620 .ndo_features_check = ibmvnic_features_check,
3623 /* ethtool functions */
3625 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3626 struct ethtool_link_ksettings *cmd)
3628 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3631 rc = send_query_phys_parms(adapter);
3633 adapter->speed = SPEED_UNKNOWN;
3634 adapter->duplex = DUPLEX_UNKNOWN;
3636 cmd->base.speed = adapter->speed;
3637 cmd->base.duplex = adapter->duplex;
3638 cmd->base.port = PORT_FIBRE;
3639 cmd->base.phy_address = 0;
3640 cmd->base.autoneg = AUTONEG_ENABLE;
3645 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3646 struct ethtool_drvinfo *info)
3648 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3650 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3651 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3652 strscpy(info->fw_version, adapter->fw_version,
3653 sizeof(info->fw_version));
3656 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3658 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3660 return adapter->msg_enable;
3663 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3665 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3667 adapter->msg_enable = data;
3670 static u32 ibmvnic_get_link(struct net_device *netdev)
3672 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3674 /* Don't need to send a query because we request a logical link up at
3675 * init and then we wait for link state indications
3677 return adapter->logical_link_state;
3680 static void ibmvnic_get_ringparam(struct net_device *netdev,
3681 struct ethtool_ringparam *ring,
3682 struct kernel_ethtool_ringparam *kernel_ring,
3683 struct netlink_ext_ack *extack)
3685 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3687 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3688 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3689 ring->rx_mini_max_pending = 0;
3690 ring->rx_jumbo_max_pending = 0;
3691 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3692 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3693 ring->rx_mini_pending = 0;
3694 ring->rx_jumbo_pending = 0;
3697 static int ibmvnic_set_ringparam(struct net_device *netdev,
3698 struct ethtool_ringparam *ring,
3699 struct kernel_ethtool_ringparam *kernel_ring,
3700 struct netlink_ext_ack *extack)
3702 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3704 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3705 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3706 netdev_err(netdev, "Invalid request.\n");
3707 netdev_err(netdev, "Max tx buffers = %llu\n",
3708 adapter->max_rx_add_entries_per_subcrq);
3709 netdev_err(netdev, "Max rx buffers = %llu\n",
3710 adapter->max_tx_entries_per_subcrq);
3714 adapter->desired.rx_entries = ring->rx_pending;
3715 adapter->desired.tx_entries = ring->tx_pending;
3717 return wait_for_reset(adapter);
3720 static void ibmvnic_get_channels(struct net_device *netdev,
3721 struct ethtool_channels *channels)
3723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3725 channels->max_rx = adapter->max_rx_queues;
3726 channels->max_tx = adapter->max_tx_queues;
3727 channels->max_other = 0;
3728 channels->max_combined = 0;
3729 channels->rx_count = adapter->req_rx_queues;
3730 channels->tx_count = adapter->req_tx_queues;
3731 channels->other_count = 0;
3732 channels->combined_count = 0;
3735 static int ibmvnic_set_channels(struct net_device *netdev,
3736 struct ethtool_channels *channels)
3738 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3740 adapter->desired.rx_queues = channels->rx_count;
3741 adapter->desired.tx_queues = channels->tx_count;
3743 return wait_for_reset(adapter);
3746 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3748 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3751 if (stringset != ETH_SS_STATS)
3754 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3755 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3757 for (i = 0; i < adapter->req_tx_queues; i++) {
3758 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3759 data += ETH_GSTRING_LEN;
3761 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3762 data += ETH_GSTRING_LEN;
3764 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
3765 data += ETH_GSTRING_LEN;
3768 for (i = 0; i < adapter->req_rx_queues; i++) {
3769 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3770 data += ETH_GSTRING_LEN;
3772 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3773 data += ETH_GSTRING_LEN;
3775 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3776 data += ETH_GSTRING_LEN;
3780 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3782 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3786 return ARRAY_SIZE(ibmvnic_stats) +
3787 adapter->req_tx_queues * NUM_TX_STATS +
3788 adapter->req_rx_queues * NUM_RX_STATS;
3794 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3795 struct ethtool_stats *stats, u64 *data)
3797 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3798 union ibmvnic_crq crq;
3802 memset(&crq, 0, sizeof(crq));
3803 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3804 crq.request_statistics.cmd = REQUEST_STATISTICS;
3805 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3806 crq.request_statistics.len =
3807 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3809 /* Wait for data to be written */
3810 reinit_completion(&adapter->stats_done);
3811 rc = ibmvnic_send_crq(adapter, &crq);
3814 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3818 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3819 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3820 (adapter, ibmvnic_stats[i].offset));
3822 for (j = 0; j < adapter->req_tx_queues; j++) {
3823 data[i] = adapter->tx_stats_buffers[j].packets;
3825 data[i] = adapter->tx_stats_buffers[j].bytes;
3827 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3831 for (j = 0; j < adapter->req_rx_queues; j++) {
3832 data[i] = adapter->rx_stats_buffers[j].packets;
3834 data[i] = adapter->rx_stats_buffers[j].bytes;
3836 data[i] = adapter->rx_stats_buffers[j].interrupts;
3841 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3842 .get_drvinfo = ibmvnic_get_drvinfo,
3843 .get_msglevel = ibmvnic_get_msglevel,
3844 .set_msglevel = ibmvnic_set_msglevel,
3845 .get_link = ibmvnic_get_link,
3846 .get_ringparam = ibmvnic_get_ringparam,
3847 .set_ringparam = ibmvnic_set_ringparam,
3848 .get_channels = ibmvnic_get_channels,
3849 .set_channels = ibmvnic_set_channels,
3850 .get_strings = ibmvnic_get_strings,
3851 .get_sset_count = ibmvnic_get_sset_count,
3852 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3853 .get_link_ksettings = ibmvnic_get_link_ksettings,
3856 /* Routines for managing CRQs/sCRQs */
3858 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3859 struct ibmvnic_sub_crq_queue *scrq)
3864 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3869 free_irq(scrq->irq, scrq);
3870 irq_dispose_mapping(scrq->irq);
3875 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3876 atomic_set(&scrq->used, 0);
3878 scrq->ind_buf.index = 0;
3880 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3884 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3885 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3889 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3893 if (!adapter->tx_scrq || !adapter->rx_scrq)
3896 ibmvnic_clean_affinity(adapter);
3898 for (i = 0; i < adapter->req_tx_queues; i++) {
3899 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3900 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3905 for (i = 0; i < adapter->req_rx_queues; i++) {
3906 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3907 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3915 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3916 struct ibmvnic_sub_crq_queue *scrq,
3919 struct device *dev = &adapter->vdev->dev;
3922 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3925 /* Close the sub-crqs */
3927 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3928 adapter->vdev->unit_address,
3930 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3933 netdev_err(adapter->netdev,
3934 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3939 dma_free_coherent(dev,
3941 scrq->ind_buf.indir_arr,
3942 scrq->ind_buf.indir_dma);
3944 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3946 free_pages((unsigned long)scrq->msgs, 2);
3947 free_cpumask_var(scrq->affinity_mask);
3951 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3954 struct device *dev = &adapter->vdev->dev;
3955 struct ibmvnic_sub_crq_queue *scrq;
3958 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3963 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3965 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3966 goto zero_page_failed;
3968 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
3969 goto cpumask_alloc_failed;
3971 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3973 if (dma_mapping_error(dev, scrq->msg_token)) {
3974 dev_warn(dev, "Couldn't map crq queue messages page\n");
3978 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3979 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3981 if (rc == H_RESOURCE)
3982 rc = ibmvnic_reset_crq(adapter);
3984 if (rc == H_CLOSED) {
3985 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3987 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3991 scrq->adapter = adapter;
3992 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3993 scrq->ind_buf.index = 0;
3995 scrq->ind_buf.indir_arr =
3996 dma_alloc_coherent(dev,
3998 &scrq->ind_buf.indir_dma,
4001 if (!scrq->ind_buf.indir_arr)
4004 spin_lock_init(&scrq->lock);
4006 netdev_dbg(adapter->netdev,
4007 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
4008 scrq->crq_num, scrq->hw_irq, scrq->irq);
4014 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4015 adapter->vdev->unit_address,
4017 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
4019 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4022 free_cpumask_var(scrq->affinity_mask);
4023 cpumask_alloc_failed:
4024 free_pages((unsigned long)scrq->msgs, 2);
4031 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4035 ibmvnic_clean_affinity(adapter);
4036 if (adapter->tx_scrq) {
4037 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4038 if (!adapter->tx_scrq[i])
4041 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4043 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4044 if (adapter->tx_scrq[i]->irq) {
4045 free_irq(adapter->tx_scrq[i]->irq,
4046 adapter->tx_scrq[i]);
4047 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4048 adapter->tx_scrq[i]->irq = 0;
4051 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4055 kfree(adapter->tx_scrq);
4056 adapter->tx_scrq = NULL;
4057 adapter->num_active_tx_scrqs = 0;
4060 if (adapter->rx_scrq) {
4061 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4062 if (!adapter->rx_scrq[i])
4065 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4067 if (adapter->rx_scrq[i]->irq) {
4068 free_irq(adapter->rx_scrq[i]->irq,
4069 adapter->rx_scrq[i]);
4070 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4071 adapter->rx_scrq[i]->irq = 0;
4074 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4078 kfree(adapter->rx_scrq);
4079 adapter->rx_scrq = NULL;
4080 adapter->num_active_rx_scrqs = 0;
4084 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4085 struct ibmvnic_sub_crq_queue *scrq)
4087 struct device *dev = &adapter->vdev->dev;
4090 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4091 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4093 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
4098 /* We can not use the IRQ chip EOI handler because that has the
4099 * unintended effect of changing the interrupt priority.
4101 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
4103 u64 val = 0xff000000 | scrq->hw_irq;
4106 rc = plpar_hcall_norets(H_EOI, val);
4108 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
4111 /* Due to a firmware bug, the hypervisor can send an interrupt to a
4112 * transmit or receive queue just prior to a partition migration.
4113 * Force an EOI after migration.
4115 static void ibmvnic_clear_pending_interrupt(struct device *dev,
4116 struct ibmvnic_sub_crq_queue *scrq)
4118 if (!xive_enabled())
4119 ibmvnic_xics_eoi(dev, scrq);
4122 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4123 struct ibmvnic_sub_crq_queue *scrq)
4125 struct device *dev = &adapter->vdev->dev;
4128 if (scrq->hw_irq > 0x100000000ULL) {
4129 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
4133 if (test_bit(0, &adapter->resetting) &&
4134 adapter->reset_reason == VNIC_RESET_MOBILITY) {
4135 ibmvnic_clear_pending_interrupt(dev, scrq);
4138 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4139 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4141 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
4146 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4147 struct ibmvnic_sub_crq_queue *scrq)
4149 struct device *dev = &adapter->vdev->dev;
4150 struct ibmvnic_tx_pool *tx_pool;
4151 struct ibmvnic_tx_buff *txbuff;
4152 struct netdev_queue *txq;
4153 union sub_crq *next;
4158 while (pending_scrq(adapter, scrq)) {
4159 unsigned int pool = scrq->pool_index;
4160 int num_entries = 0;
4161 int total_bytes = 0;
4162 int num_packets = 0;
4164 next = ibmvnic_next_scrq(adapter, scrq);
4165 for (i = 0; i < next->tx_comp.num_comps; i++) {
4166 index = be32_to_cpu(next->tx_comp.correlators[i]);
4167 if (index & IBMVNIC_TSO_POOL_MASK) {
4168 tx_pool = &adapter->tso_pool[pool];
4169 index &= ~IBMVNIC_TSO_POOL_MASK;
4171 tx_pool = &adapter->tx_pool[pool];
4174 txbuff = &tx_pool->tx_buff[index];
4176 num_entries += txbuff->num_entries;
4178 total_bytes += txbuff->skb->len;
4179 if (next->tx_comp.rcs[i]) {
4180 dev_err(dev, "tx error %x\n",
4181 next->tx_comp.rcs[i]);
4182 dev_kfree_skb_irq(txbuff->skb);
4184 dev_consume_skb_irq(txbuff->skb);
4188 netdev_warn(adapter->netdev,
4189 "TX completion received with NULL socket buffer\n");
4191 tx_pool->free_map[tx_pool->producer_index] = index;
4192 tx_pool->producer_index =
4193 (tx_pool->producer_index + 1) %
4194 tx_pool->num_buffers;
4196 /* remove tx_comp scrq*/
4197 next->tx_comp.first = 0;
4199 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
4200 netdev_tx_completed_queue(txq, num_packets, total_bytes);
4202 if (atomic_sub_return(num_entries, &scrq->used) <=
4203 (adapter->req_tx_entries_per_subcrq / 2) &&
4204 __netif_subqueue_stopped(adapter->netdev,
4205 scrq->pool_index)) {
4207 if (adapter->tx_queues_active) {
4208 netif_wake_subqueue(adapter->netdev,
4210 netdev_dbg(adapter->netdev,
4211 "Started queue %d\n",
4218 enable_scrq_irq(adapter, scrq);
4220 if (pending_scrq(adapter, scrq)) {
4221 disable_scrq_irq(adapter, scrq);
4228 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
4230 struct ibmvnic_sub_crq_queue *scrq = instance;
4231 struct ibmvnic_adapter *adapter = scrq->adapter;
4233 disable_scrq_irq(adapter, scrq);
4234 ibmvnic_complete_tx(adapter, scrq);
4239 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
4241 struct ibmvnic_sub_crq_queue *scrq = instance;
4242 struct ibmvnic_adapter *adapter = scrq->adapter;
4244 /* When booting a kdump kernel we can hit pending interrupts
4245 * prior to completing driver initialization.
4247 if (unlikely(adapter->state != VNIC_OPEN))
4250 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
4252 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4253 disable_scrq_irq(adapter, scrq);
4254 __napi_schedule(&adapter->napi[scrq->scrq_num]);
4260 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4262 struct device *dev = &adapter->vdev->dev;
4263 struct ibmvnic_sub_crq_queue *scrq;
4267 for (i = 0; i < adapter->req_tx_queues; i++) {
4268 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4270 scrq = adapter->tx_scrq[i];
4271 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4275 dev_err(dev, "Error mapping irq\n");
4276 goto req_tx_irq_failed;
4279 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4280 adapter->vdev->unit_address, i);
4281 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4282 0, scrq->name, scrq);
4285 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4287 irq_dispose_mapping(scrq->irq);
4288 goto req_tx_irq_failed;
4292 for (i = 0; i < adapter->req_rx_queues; i++) {
4293 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4295 scrq = adapter->rx_scrq[i];
4296 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4299 dev_err(dev, "Error mapping irq\n");
4300 goto req_rx_irq_failed;
4302 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4303 adapter->vdev->unit_address, i);
4304 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4305 0, scrq->name, scrq);
4307 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4309 irq_dispose_mapping(scrq->irq);
4310 goto req_rx_irq_failed;
4315 ibmvnic_set_affinity(adapter);
4321 for (j = 0; j < i; j++) {
4322 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4323 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4325 i = adapter->req_tx_queues;
4327 for (j = 0; j < i; j++) {
4328 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4329 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4331 release_sub_crqs(adapter, 1);
4335 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4337 struct device *dev = &adapter->vdev->dev;
4338 struct ibmvnic_sub_crq_queue **allqueues;
4339 int registered_queues = 0;
4344 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4346 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4350 for (i = 0; i < total_queues; i++) {
4351 allqueues[i] = init_sub_crq_queue(adapter);
4352 if (!allqueues[i]) {
4353 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4356 registered_queues++;
4359 /* Make sure we were able to register the minimum number of queues */
4360 if (registered_queues <
4361 adapter->min_tx_queues + adapter->min_rx_queues) {
4362 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
4366 /* Distribute the failed allocated queues*/
4367 for (i = 0; i < total_queues - registered_queues + more ; i++) {
4368 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4371 if (adapter->req_rx_queues > adapter->min_rx_queues)
4372 adapter->req_rx_queues--;
4377 if (adapter->req_tx_queues > adapter->min_tx_queues)
4378 adapter->req_tx_queues--;
4385 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4386 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4387 if (!adapter->tx_scrq)
4390 for (i = 0; i < adapter->req_tx_queues; i++) {
4391 adapter->tx_scrq[i] = allqueues[i];
4392 adapter->tx_scrq[i]->pool_index = i;
4393 adapter->num_active_tx_scrqs++;
4396 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4397 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4398 if (!adapter->rx_scrq)
4401 for (i = 0; i < adapter->req_rx_queues; i++) {
4402 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4403 adapter->rx_scrq[i]->scrq_num = i;
4404 adapter->num_active_rx_scrqs++;
4411 kfree(adapter->tx_scrq);
4412 adapter->tx_scrq = NULL;
4414 for (i = 0; i < registered_queues; i++)
4415 release_sub_crq_queue(adapter, allqueues[i], 1);
4420 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4422 struct device *dev = &adapter->vdev->dev;
4423 union ibmvnic_crq crq;
4427 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4428 * the PROMISC flag). Initialize this count upfront. When the tasklet
4429 * receives a response to all of these, it will send the next protocol
4430 * message (QUERY_IP_OFFLOAD).
4432 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4433 adapter->promisc_supported)
4439 /* Sub-CRQ entries are 32 byte long */
4440 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4442 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4444 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4445 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4446 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4450 if (adapter->desired.mtu)
4451 adapter->req_mtu = adapter->desired.mtu;
4453 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4455 if (!adapter->desired.tx_entries)
4456 adapter->desired.tx_entries =
4457 adapter->max_tx_entries_per_subcrq;
4458 if (!adapter->desired.rx_entries)
4459 adapter->desired.rx_entries =
4460 adapter->max_rx_add_entries_per_subcrq;
4462 max_entries = IBMVNIC_LTB_SET_SIZE /
4463 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4465 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4466 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4467 adapter->desired.tx_entries = max_entries;
4470 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4471 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4472 adapter->desired.rx_entries = max_entries;
4475 if (adapter->desired.tx_entries)
4476 adapter->req_tx_entries_per_subcrq =
4477 adapter->desired.tx_entries;
4479 adapter->req_tx_entries_per_subcrq =
4480 adapter->max_tx_entries_per_subcrq;
4482 if (adapter->desired.rx_entries)
4483 adapter->req_rx_add_entries_per_subcrq =
4484 adapter->desired.rx_entries;
4486 adapter->req_rx_add_entries_per_subcrq =
4487 adapter->max_rx_add_entries_per_subcrq;
4489 if (adapter->desired.tx_queues)
4490 adapter->req_tx_queues =
4491 adapter->desired.tx_queues;
4493 adapter->req_tx_queues =
4494 adapter->opt_tx_comp_sub_queues;
4496 if (adapter->desired.rx_queues)
4497 adapter->req_rx_queues =
4498 adapter->desired.rx_queues;
4500 adapter->req_rx_queues =
4501 adapter->opt_rx_comp_queues;
4503 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4505 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4507 memset(&crq, 0, sizeof(crq));
4508 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4509 crq.request_capability.cmd = REQUEST_CAPABILITY;
4511 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4512 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4514 ibmvnic_send_crq(adapter, &crq);
4516 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4517 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4519 ibmvnic_send_crq(adapter, &crq);
4521 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4522 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4524 ibmvnic_send_crq(adapter, &crq);
4526 crq.request_capability.capability =
4527 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4528 crq.request_capability.number =
4529 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4531 ibmvnic_send_crq(adapter, &crq);
4533 crq.request_capability.capability =
4534 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4535 crq.request_capability.number =
4536 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4538 ibmvnic_send_crq(adapter, &crq);
4540 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4541 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4543 ibmvnic_send_crq(adapter, &crq);
4545 if (adapter->netdev->flags & IFF_PROMISC) {
4546 if (adapter->promisc_supported) {
4547 crq.request_capability.capability =
4548 cpu_to_be16(PROMISC_REQUESTED);
4549 crq.request_capability.number = cpu_to_be64(1);
4551 ibmvnic_send_crq(adapter, &crq);
4554 crq.request_capability.capability =
4555 cpu_to_be16(PROMISC_REQUESTED);
4556 crq.request_capability.number = cpu_to_be64(0);
4558 ibmvnic_send_crq(adapter, &crq);
4561 /* Keep at end to catch any discrepancy between expected and actual
4564 WARN_ON(cap_reqs != 0);
4567 static int pending_scrq(struct ibmvnic_adapter *adapter,
4568 struct ibmvnic_sub_crq_queue *scrq)
4570 union sub_crq *entry = &scrq->msgs[scrq->cur];
4573 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4575 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4576 * contents of the SCRQ descriptor
4583 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4584 struct ibmvnic_sub_crq_queue *scrq)
4586 union sub_crq *entry;
4587 unsigned long flags;
4589 spin_lock_irqsave(&scrq->lock, flags);
4590 entry = &scrq->msgs[scrq->cur];
4591 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4592 if (++scrq->cur == scrq->size)
4597 spin_unlock_irqrestore(&scrq->lock, flags);
4599 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4600 * contents of the SCRQ descriptor
4607 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4609 struct ibmvnic_crq_queue *queue = &adapter->crq;
4610 union ibmvnic_crq *crq;
4612 crq = &queue->msgs[queue->cur];
4613 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4614 if (++queue->cur == queue->size)
4623 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4627 dev_warn_ratelimited(dev,
4628 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4632 dev_warn_ratelimited(dev,
4633 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4637 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4642 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4643 u64 remote_handle, u64 ioba, u64 num_entries)
4645 unsigned int ua = adapter->vdev->unit_address;
4646 struct device *dev = &adapter->vdev->dev;
4649 /* Make sure the hypervisor sees the complete request */
4651 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4652 cpu_to_be64(remote_handle),
4656 print_subcrq_error(dev, rc, __func__);
4661 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4662 union ibmvnic_crq *crq)
4664 unsigned int ua = adapter->vdev->unit_address;
4665 struct device *dev = &adapter->vdev->dev;
4666 u64 *u64_crq = (u64 *)crq;
4669 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4670 (unsigned long)cpu_to_be64(u64_crq[0]),
4671 (unsigned long)cpu_to_be64(u64_crq[1]));
4673 if (!adapter->crq.active &&
4674 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4675 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4679 /* Make sure the hypervisor sees the complete request */
4682 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4683 cpu_to_be64(u64_crq[0]),
4684 cpu_to_be64(u64_crq[1]));
4687 if (rc == H_CLOSED) {
4688 dev_warn(dev, "CRQ Queue closed\n");
4689 /* do not reset, report the fail, wait for passive init from server */
4692 dev_warn(dev, "Send error (rc=%d)\n", rc);
4698 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4700 struct device *dev = &adapter->vdev->dev;
4701 union ibmvnic_crq crq;
4705 memset(&crq, 0, sizeof(crq));
4706 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4707 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4708 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4711 rc = ibmvnic_send_crq(adapter, &crq);
4717 } while (retries > 0);
4720 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4727 struct vnic_login_client_data {
4733 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4737 /* Calculate the amount of buffer space needed for the
4738 * vnic client data in the login buffer. There are four entries,
4739 * OS name, LPAR name, device name, and a null last entry.
4741 len = 4 * sizeof(struct vnic_login_client_data);
4742 len += 6; /* "Linux" plus NULL */
4743 len += strlen(utsname()->nodename) + 1;
4744 len += strlen(adapter->netdev->name) + 1;
4749 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4750 struct vnic_login_client_data *vlcd)
4752 const char *os_name = "Linux";
4755 /* Type 1 - LPAR OS */
4757 len = strlen(os_name) + 1;
4758 vlcd->len = cpu_to_be16(len);
4759 strscpy(vlcd->name, os_name, len);
4760 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4762 /* Type 2 - LPAR name */
4764 len = strlen(utsname()->nodename) + 1;
4765 vlcd->len = cpu_to_be16(len);
4766 strscpy(vlcd->name, utsname()->nodename, len);
4767 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4769 /* Type 3 - device name */
4771 len = strlen(adapter->netdev->name) + 1;
4772 vlcd->len = cpu_to_be16(len);
4773 strscpy(vlcd->name, adapter->netdev->name, len);
4776 static int send_login(struct ibmvnic_adapter *adapter)
4778 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4779 struct ibmvnic_login_buffer *login_buffer;
4780 struct device *dev = &adapter->vdev->dev;
4781 struct vnic_login_client_data *vlcd;
4782 dma_addr_t rsp_buffer_token;
4783 dma_addr_t buffer_token;
4784 size_t rsp_buffer_size;
4785 union ibmvnic_crq crq;
4786 int client_data_len;
4793 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4794 netdev_err(adapter->netdev,
4795 "RX or TX queues are not allocated, device login failed\n");
4799 release_login_buffer(adapter);
4800 release_login_rsp_buffer(adapter);
4802 client_data_len = vnic_client_data_len(adapter);
4805 sizeof(struct ibmvnic_login_buffer) +
4806 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4809 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4811 goto buf_alloc_failed;
4813 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4815 if (dma_mapping_error(dev, buffer_token)) {
4816 dev_err(dev, "Couldn't map login buffer\n");
4817 goto buf_map_failed;
4820 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4821 sizeof(u64) * adapter->req_tx_queues +
4822 sizeof(u64) * adapter->req_rx_queues +
4823 sizeof(u64) * adapter->req_rx_queues +
4824 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4826 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4827 if (!login_rsp_buffer)
4828 goto buf_rsp_alloc_failed;
4830 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4831 rsp_buffer_size, DMA_FROM_DEVICE);
4832 if (dma_mapping_error(dev, rsp_buffer_token)) {
4833 dev_err(dev, "Couldn't map login rsp buffer\n");
4834 goto buf_rsp_map_failed;
4837 adapter->login_buf = login_buffer;
4838 adapter->login_buf_token = buffer_token;
4839 adapter->login_buf_sz = buffer_size;
4840 adapter->login_rsp_buf = login_rsp_buffer;
4841 adapter->login_rsp_buf_token = rsp_buffer_token;
4842 adapter->login_rsp_buf_sz = rsp_buffer_size;
4844 login_buffer->len = cpu_to_be32(buffer_size);
4845 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4846 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4847 login_buffer->off_txcomp_subcrqs =
4848 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4849 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4850 login_buffer->off_rxcomp_subcrqs =
4851 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4852 sizeof(u64) * adapter->req_tx_queues);
4853 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4854 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4856 tx_list_p = (__be64 *)((char *)login_buffer +
4857 sizeof(struct ibmvnic_login_buffer));
4858 rx_list_p = (__be64 *)((char *)login_buffer +
4859 sizeof(struct ibmvnic_login_buffer) +
4860 sizeof(u64) * adapter->req_tx_queues);
4862 for (i = 0; i < adapter->req_tx_queues; i++) {
4863 if (adapter->tx_scrq[i]) {
4865 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4869 for (i = 0; i < adapter->req_rx_queues; i++) {
4870 if (adapter->rx_scrq[i]) {
4872 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4876 /* Insert vNIC login client data */
4877 vlcd = (struct vnic_login_client_data *)
4878 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4879 login_buffer->client_data_offset =
4880 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4881 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4883 vnic_add_client_data(adapter, vlcd);
4885 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4886 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4887 netdev_dbg(adapter->netdev, "%016lx\n",
4888 ((unsigned long *)(adapter->login_buf))[i]);
4891 memset(&crq, 0, sizeof(crq));
4892 crq.login.first = IBMVNIC_CRQ_CMD;
4893 crq.login.cmd = LOGIN;
4894 crq.login.ioba = cpu_to_be32(buffer_token);
4895 crq.login.len = cpu_to_be32(buffer_size);
4897 adapter->login_pending = true;
4898 rc = ibmvnic_send_crq(adapter, &crq);
4900 adapter->login_pending = false;
4901 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4902 goto buf_send_failed;
4908 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
4911 kfree(login_rsp_buffer);
4912 adapter->login_rsp_buf = NULL;
4913 buf_rsp_alloc_failed:
4914 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4916 kfree(login_buffer);
4917 adapter->login_buf = NULL;
4922 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4925 union ibmvnic_crq crq;
4927 memset(&crq, 0, sizeof(crq));
4928 crq.request_map.first = IBMVNIC_CRQ_CMD;
4929 crq.request_map.cmd = REQUEST_MAP;
4930 crq.request_map.map_id = map_id;
4931 crq.request_map.ioba = cpu_to_be32(addr);
4932 crq.request_map.len = cpu_to_be32(len);
4933 return ibmvnic_send_crq(adapter, &crq);
4936 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4938 union ibmvnic_crq crq;
4940 memset(&crq, 0, sizeof(crq));
4941 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4942 crq.request_unmap.cmd = REQUEST_UNMAP;
4943 crq.request_unmap.map_id = map_id;
4944 return ibmvnic_send_crq(adapter, &crq);
4947 static void send_query_map(struct ibmvnic_adapter *adapter)
4949 union ibmvnic_crq crq;
4951 memset(&crq, 0, sizeof(crq));
4952 crq.query_map.first = IBMVNIC_CRQ_CMD;
4953 crq.query_map.cmd = QUERY_MAP;
4954 ibmvnic_send_crq(adapter, &crq);
4957 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4958 static void send_query_cap(struct ibmvnic_adapter *adapter)
4960 union ibmvnic_crq crq;
4963 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
4964 * upfront. When the tasklet receives a response to all of these, it
4965 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4969 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4971 memset(&crq, 0, sizeof(crq));
4972 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4973 crq.query_capability.cmd = QUERY_CAPABILITY;
4975 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4976 ibmvnic_send_crq(adapter, &crq);
4979 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4980 ibmvnic_send_crq(adapter, &crq);
4983 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4984 ibmvnic_send_crq(adapter, &crq);
4987 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4988 ibmvnic_send_crq(adapter, &crq);
4991 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4992 ibmvnic_send_crq(adapter, &crq);
4995 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4996 ibmvnic_send_crq(adapter, &crq);
4999 crq.query_capability.capability =
5000 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
5001 ibmvnic_send_crq(adapter, &crq);
5004 crq.query_capability.capability =
5005 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
5006 ibmvnic_send_crq(adapter, &crq);
5009 crq.query_capability.capability =
5010 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
5011 ibmvnic_send_crq(adapter, &crq);
5014 crq.query_capability.capability =
5015 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
5016 ibmvnic_send_crq(adapter, &crq);
5019 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
5020 ibmvnic_send_crq(adapter, &crq);
5023 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
5024 ibmvnic_send_crq(adapter, &crq);
5027 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
5028 ibmvnic_send_crq(adapter, &crq);
5031 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
5032 ibmvnic_send_crq(adapter, &crq);
5035 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
5036 ibmvnic_send_crq(adapter, &crq);
5039 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
5040 ibmvnic_send_crq(adapter, &crq);
5043 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
5044 ibmvnic_send_crq(adapter, &crq);
5047 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
5048 ibmvnic_send_crq(adapter, &crq);
5051 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
5052 ibmvnic_send_crq(adapter, &crq);
5055 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
5056 ibmvnic_send_crq(adapter, &crq);
5059 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
5060 ibmvnic_send_crq(adapter, &crq);
5063 crq.query_capability.capability =
5064 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
5065 ibmvnic_send_crq(adapter, &crq);
5068 crq.query_capability.capability =
5069 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
5070 ibmvnic_send_crq(adapter, &crq);
5073 crq.query_capability.capability =
5074 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
5075 ibmvnic_send_crq(adapter, &crq);
5078 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
5080 ibmvnic_send_crq(adapter, &crq);
5083 /* Keep at end to catch any discrepancy between expected and actual
5086 WARN_ON(cap_reqs != 0);
5089 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
5091 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
5092 struct device *dev = &adapter->vdev->dev;
5093 union ibmvnic_crq crq;
5095 adapter->ip_offload_tok =
5097 &adapter->ip_offload_buf,
5101 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
5102 if (!firmware_has_feature(FW_FEATURE_CMO))
5103 dev_err(dev, "Couldn't map offload buffer\n");
5107 memset(&crq, 0, sizeof(crq));
5108 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
5109 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
5110 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
5111 crq.query_ip_offload.ioba =
5112 cpu_to_be32(adapter->ip_offload_tok);
5114 ibmvnic_send_crq(adapter, &crq);
5117 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
5119 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
5120 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5121 struct device *dev = &adapter->vdev->dev;
5122 netdev_features_t old_hw_features = 0;
5123 union ibmvnic_crq crq;
5125 adapter->ip_offload_ctrl_tok =
5128 sizeof(adapter->ip_offload_ctrl),
5131 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
5132 dev_err(dev, "Couldn't map ip offload control buffer\n");
5136 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5137 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
5138 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
5139 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
5140 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
5141 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
5142 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
5143 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
5144 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
5145 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
5147 /* large_rx disabled for now, additional features needed */
5148 ctrl_buf->large_rx_ipv4 = 0;
5149 ctrl_buf->large_rx_ipv6 = 0;
5151 if (adapter->state != VNIC_PROBING) {
5152 old_hw_features = adapter->netdev->hw_features;
5153 adapter->netdev->hw_features = 0;
5156 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
5158 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
5159 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
5161 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
5162 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
5164 if ((adapter->netdev->features &
5165 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
5166 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
5168 if (buf->large_tx_ipv4)
5169 adapter->netdev->hw_features |= NETIF_F_TSO;
5170 if (buf->large_tx_ipv6)
5171 adapter->netdev->hw_features |= NETIF_F_TSO6;
5173 if (adapter->state == VNIC_PROBING) {
5174 adapter->netdev->features |= adapter->netdev->hw_features;
5175 } else if (old_hw_features != adapter->netdev->hw_features) {
5176 netdev_features_t tmp = 0;
5178 /* disable features no longer supported */
5179 adapter->netdev->features &= adapter->netdev->hw_features;
5180 /* turn on features now supported if previously enabled */
5181 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
5182 adapter->netdev->hw_features;
5183 adapter->netdev->features |=
5184 tmp & adapter->netdev->wanted_features;
5187 memset(&crq, 0, sizeof(crq));
5188 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
5189 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
5190 crq.control_ip_offload.len =
5191 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5192 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
5193 ibmvnic_send_crq(adapter, &crq);
5196 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
5197 struct ibmvnic_adapter *adapter)
5199 struct device *dev = &adapter->vdev->dev;
5201 if (crq->get_vpd_size_rsp.rc.code) {
5202 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
5203 crq->get_vpd_size_rsp.rc.code);
5204 complete(&adapter->fw_done);
5208 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
5209 complete(&adapter->fw_done);
5212 static void handle_vpd_rsp(union ibmvnic_crq *crq,
5213 struct ibmvnic_adapter *adapter)
5215 struct device *dev = &adapter->vdev->dev;
5216 unsigned char *substr = NULL;
5217 u8 fw_level_len = 0;
5219 memset(adapter->fw_version, 0, 32);
5221 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
5224 if (crq->get_vpd_rsp.rc.code) {
5225 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
5226 crq->get_vpd_rsp.rc.code);
5230 /* get the position of the firmware version info
5231 * located after the ASCII 'RM' substring in the buffer
5233 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
5235 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
5239 /* get length of firmware level ASCII substring */
5240 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
5241 fw_level_len = *(substr + 2);
5243 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
5247 /* copy firmware version string from vpd into adapter */
5248 if ((substr + 3 + fw_level_len) <
5249 (adapter->vpd->buff + adapter->vpd->len)) {
5250 strscpy(adapter->fw_version, substr + 3,
5251 sizeof(adapter->fw_version));
5253 dev_info(dev, "FW substr extrapolated VPD buff\n");
5257 if (adapter->fw_version[0] == '\0')
5258 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
5259 complete(&adapter->fw_done);
5262 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5264 struct device *dev = &adapter->vdev->dev;
5265 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5268 dma_unmap_single(dev, adapter->ip_offload_tok,
5269 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5271 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5272 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
5273 netdev_dbg(adapter->netdev, "%016lx\n",
5274 ((unsigned long *)(buf))[i]);
5276 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5277 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5278 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5279 buf->tcp_ipv4_chksum);
5280 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5281 buf->tcp_ipv6_chksum);
5282 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5283 buf->udp_ipv4_chksum);
5284 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5285 buf->udp_ipv6_chksum);
5286 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5287 buf->large_tx_ipv4);
5288 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5289 buf->large_tx_ipv6);
5290 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5291 buf->large_rx_ipv4);
5292 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5293 buf->large_rx_ipv6);
5294 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5295 buf->max_ipv4_header_size);
5296 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5297 buf->max_ipv6_header_size);
5298 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5299 buf->max_tcp_header_size);
5300 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5301 buf->max_udp_header_size);
5302 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5303 buf->max_large_tx_size);
5304 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5305 buf->max_large_rx_size);
5306 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5307 buf->ipv6_extension_header);
5308 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5309 buf->tcp_pseudosum_req);
5310 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5311 buf->num_ipv6_ext_headers);
5312 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5313 buf->off_ipv6_ext_headers);
5315 send_control_ip_offload(adapter);
5318 static const char *ibmvnic_fw_err_cause(u16 cause)
5321 case ADAPTER_PROBLEM:
5322 return "adapter problem";
5324 return "bus problem";
5326 return "firmware problem";
5328 return "device driver problem";
5330 return "EEH recovery";
5332 return "firmware updated";
5334 return "low Memory";
5340 static void handle_error_indication(union ibmvnic_crq *crq,
5341 struct ibmvnic_adapter *adapter)
5343 struct device *dev = &adapter->vdev->dev;
5346 cause = be16_to_cpu(crq->error_indication.error_cause);
5348 dev_warn_ratelimited(dev,
5349 "Firmware reports %serror, cause: %s. Starting recovery...\n",
5350 crq->error_indication.flags
5351 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5352 ibmvnic_fw_err_cause(cause));
5354 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5355 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5357 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5360 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5361 struct ibmvnic_adapter *adapter)
5363 struct net_device *netdev = adapter->netdev;
5364 struct device *dev = &adapter->vdev->dev;
5367 rc = crq->change_mac_addr_rsp.rc.code;
5369 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5372 /* crq->change_mac_addr.mac_addr is the requested one
5373 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5375 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5376 ether_addr_copy(adapter->mac_addr,
5377 &crq->change_mac_addr_rsp.mac_addr[0]);
5379 complete(&adapter->fw_done);
5383 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5384 struct ibmvnic_adapter *adapter)
5386 struct device *dev = &adapter->vdev->dev;
5390 atomic_dec(&adapter->running_cap_crqs);
5391 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5392 atomic_read(&adapter->running_cap_crqs));
5393 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5395 req_value = &adapter->req_tx_queues;
5399 req_value = &adapter->req_rx_queues;
5402 case REQ_RX_ADD_QUEUES:
5403 req_value = &adapter->req_rx_add_queues;
5406 case REQ_TX_ENTRIES_PER_SUBCRQ:
5407 req_value = &adapter->req_tx_entries_per_subcrq;
5408 name = "tx_entries_per_subcrq";
5410 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5411 req_value = &adapter->req_rx_add_entries_per_subcrq;
5412 name = "rx_add_entries_per_subcrq";
5415 req_value = &adapter->req_mtu;
5418 case PROMISC_REQUESTED:
5419 req_value = &adapter->promisc;
5423 dev_err(dev, "Got invalid cap request rsp %d\n",
5424 crq->request_capability.capability);
5428 switch (crq->request_capability_rsp.rc.code) {
5431 case PARTIALSUCCESS:
5432 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5434 (long)be64_to_cpu(crq->request_capability_rsp.number),
5437 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5439 pr_err("mtu of %llu is not supported. Reverting.\n",
5441 *req_value = adapter->fallback.mtu;
5444 be64_to_cpu(crq->request_capability_rsp.number);
5447 send_request_cap(adapter, 1);
5450 dev_err(dev, "Error %d in request cap rsp\n",
5451 crq->request_capability_rsp.rc.code);
5455 /* Done receiving requested capabilities, query IP offload support */
5456 if (atomic_read(&adapter->running_cap_crqs) == 0)
5457 send_query_ip_offload(adapter);
5460 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5461 struct ibmvnic_adapter *adapter)
5463 struct device *dev = &adapter->vdev->dev;
5464 struct net_device *netdev = adapter->netdev;
5465 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5466 struct ibmvnic_login_buffer *login = adapter->login_buf;
5467 u64 *tx_handle_array;
5468 u64 *rx_handle_array;
5475 /* CHECK: Test/set of login_pending does not need to be atomic
5476 * because only ibmvnic_tasklet tests/clears this.
5478 if (!adapter->login_pending) {
5479 netdev_warn(netdev, "Ignoring unexpected login response\n");
5482 adapter->login_pending = false;
5484 /* If the number of queues requested can't be allocated by the
5485 * server, the login response will return with code 1. We will need
5486 * to resend the login buffer with fewer queues requested.
5488 if (login_rsp_crq->generic.rc.code) {
5489 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5490 complete(&adapter->init_done);
5494 if (adapter->failover_pending) {
5495 adapter->init_done_rc = -EAGAIN;
5496 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5497 complete(&adapter->init_done);
5498 /* login response buffer will be released on reset */
5502 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5504 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5505 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5506 netdev_dbg(adapter->netdev, "%016lx\n",
5507 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5511 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5512 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5513 adapter->req_rx_add_queues !=
5514 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5515 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5516 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5520 rsp_len = be32_to_cpu(login_rsp->len);
5521 if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
5522 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
5523 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
5524 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
5525 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
5526 /* This can happen if a login request times out and there are
5527 * 2 outstanding login requests sent, the LOGIN_RSP crq
5528 * could have been for the older login request. So we are
5529 * parsing the newer response buffer which may be incomplete
5531 dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
5532 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5536 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5537 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5538 /* variable buffer sizes are not supported, so just read the
5541 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5543 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5544 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5546 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5547 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5548 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5549 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5551 for (i = 0; i < num_tx_pools; i++)
5552 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5554 for (i = 0; i < num_rx_pools; i++)
5555 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5557 adapter->num_active_tx_scrqs = num_tx_pools;
5558 adapter->num_active_rx_scrqs = num_rx_pools;
5559 release_login_rsp_buffer(adapter);
5560 release_login_buffer(adapter);
5561 complete(&adapter->init_done);
5566 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5567 struct ibmvnic_adapter *adapter)
5569 struct device *dev = &adapter->vdev->dev;
5572 rc = crq->request_unmap_rsp.rc.code;
5574 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5577 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5578 struct ibmvnic_adapter *adapter)
5580 struct net_device *netdev = adapter->netdev;
5581 struct device *dev = &adapter->vdev->dev;
5584 rc = crq->query_map_rsp.rc.code;
5586 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5589 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5590 crq->query_map_rsp.page_size,
5591 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5592 __be32_to_cpu(crq->query_map_rsp.free_pages));
5595 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5596 struct ibmvnic_adapter *adapter)
5598 struct net_device *netdev = adapter->netdev;
5599 struct device *dev = &adapter->vdev->dev;
5602 atomic_dec(&adapter->running_cap_crqs);
5603 netdev_dbg(netdev, "Outstanding queries: %d\n",
5604 atomic_read(&adapter->running_cap_crqs));
5605 rc = crq->query_capability.rc.code;
5607 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5611 switch (be16_to_cpu(crq->query_capability.capability)) {
5613 adapter->min_tx_queues =
5614 be64_to_cpu(crq->query_capability.number);
5615 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5616 adapter->min_tx_queues);
5619 adapter->min_rx_queues =
5620 be64_to_cpu(crq->query_capability.number);
5621 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5622 adapter->min_rx_queues);
5624 case MIN_RX_ADD_QUEUES:
5625 adapter->min_rx_add_queues =
5626 be64_to_cpu(crq->query_capability.number);
5627 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5628 adapter->min_rx_add_queues);
5631 adapter->max_tx_queues =
5632 be64_to_cpu(crq->query_capability.number);
5633 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5634 adapter->max_tx_queues);
5637 adapter->max_rx_queues =
5638 be64_to_cpu(crq->query_capability.number);
5639 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5640 adapter->max_rx_queues);
5642 case MAX_RX_ADD_QUEUES:
5643 adapter->max_rx_add_queues =
5644 be64_to_cpu(crq->query_capability.number);
5645 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5646 adapter->max_rx_add_queues);
5648 case MIN_TX_ENTRIES_PER_SUBCRQ:
5649 adapter->min_tx_entries_per_subcrq =
5650 be64_to_cpu(crq->query_capability.number);
5651 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5652 adapter->min_tx_entries_per_subcrq);
5654 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5655 adapter->min_rx_add_entries_per_subcrq =
5656 be64_to_cpu(crq->query_capability.number);
5657 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5658 adapter->min_rx_add_entries_per_subcrq);
5660 case MAX_TX_ENTRIES_PER_SUBCRQ:
5661 adapter->max_tx_entries_per_subcrq =
5662 be64_to_cpu(crq->query_capability.number);
5663 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5664 adapter->max_tx_entries_per_subcrq);
5666 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5667 adapter->max_rx_add_entries_per_subcrq =
5668 be64_to_cpu(crq->query_capability.number);
5669 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5670 adapter->max_rx_add_entries_per_subcrq);
5672 case TCP_IP_OFFLOAD:
5673 adapter->tcp_ip_offload =
5674 be64_to_cpu(crq->query_capability.number);
5675 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5676 adapter->tcp_ip_offload);
5678 case PROMISC_SUPPORTED:
5679 adapter->promisc_supported =
5680 be64_to_cpu(crq->query_capability.number);
5681 netdev_dbg(netdev, "promisc_supported = %lld\n",
5682 adapter->promisc_supported);
5685 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5686 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5687 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5690 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5691 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5692 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5694 case MAX_MULTICAST_FILTERS:
5695 adapter->max_multicast_filters =
5696 be64_to_cpu(crq->query_capability.number);
5697 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5698 adapter->max_multicast_filters);
5700 case VLAN_HEADER_INSERTION:
5701 adapter->vlan_header_insertion =
5702 be64_to_cpu(crq->query_capability.number);
5703 if (adapter->vlan_header_insertion)
5704 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5705 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5706 adapter->vlan_header_insertion);
5708 case RX_VLAN_HEADER_INSERTION:
5709 adapter->rx_vlan_header_insertion =
5710 be64_to_cpu(crq->query_capability.number);
5711 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5712 adapter->rx_vlan_header_insertion);
5714 case MAX_TX_SG_ENTRIES:
5715 adapter->max_tx_sg_entries =
5716 be64_to_cpu(crq->query_capability.number);
5717 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5718 adapter->max_tx_sg_entries);
5720 case RX_SG_SUPPORTED:
5721 adapter->rx_sg_supported =
5722 be64_to_cpu(crq->query_capability.number);
5723 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5724 adapter->rx_sg_supported);
5726 case OPT_TX_COMP_SUB_QUEUES:
5727 adapter->opt_tx_comp_sub_queues =
5728 be64_to_cpu(crq->query_capability.number);
5729 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5730 adapter->opt_tx_comp_sub_queues);
5732 case OPT_RX_COMP_QUEUES:
5733 adapter->opt_rx_comp_queues =
5734 be64_to_cpu(crq->query_capability.number);
5735 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5736 adapter->opt_rx_comp_queues);
5738 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5739 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5740 be64_to_cpu(crq->query_capability.number);
5741 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5742 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5744 case OPT_TX_ENTRIES_PER_SUBCRQ:
5745 adapter->opt_tx_entries_per_subcrq =
5746 be64_to_cpu(crq->query_capability.number);
5747 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5748 adapter->opt_tx_entries_per_subcrq);
5750 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5751 adapter->opt_rxba_entries_per_subcrq =
5752 be64_to_cpu(crq->query_capability.number);
5753 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5754 adapter->opt_rxba_entries_per_subcrq);
5756 case TX_RX_DESC_REQ:
5757 adapter->tx_rx_desc_req = crq->query_capability.number;
5758 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5759 adapter->tx_rx_desc_req);
5763 netdev_err(netdev, "Got invalid cap rsp %d\n",
5764 crq->query_capability.capability);
5768 if (atomic_read(&adapter->running_cap_crqs) == 0)
5769 send_request_cap(adapter, 0);
5772 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5774 union ibmvnic_crq crq;
5777 memset(&crq, 0, sizeof(crq));
5778 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5779 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5781 mutex_lock(&adapter->fw_lock);
5782 adapter->fw_done_rc = 0;
5783 reinit_completion(&adapter->fw_done);
5785 rc = ibmvnic_send_crq(adapter, &crq);
5787 mutex_unlock(&adapter->fw_lock);
5791 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5793 mutex_unlock(&adapter->fw_lock);
5797 mutex_unlock(&adapter->fw_lock);
5798 return adapter->fw_done_rc ? -EIO : 0;
5801 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5802 struct ibmvnic_adapter *adapter)
5804 struct net_device *netdev = adapter->netdev;
5806 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5808 rc = crq->query_phys_parms_rsp.rc.code;
5810 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5814 case IBMVNIC_10MBPS:
5815 adapter->speed = SPEED_10;
5817 case IBMVNIC_100MBPS:
5818 adapter->speed = SPEED_100;
5821 adapter->speed = SPEED_1000;
5823 case IBMVNIC_10GBPS:
5824 adapter->speed = SPEED_10000;
5826 case IBMVNIC_25GBPS:
5827 adapter->speed = SPEED_25000;
5829 case IBMVNIC_40GBPS:
5830 adapter->speed = SPEED_40000;
5832 case IBMVNIC_50GBPS:
5833 adapter->speed = SPEED_50000;
5835 case IBMVNIC_100GBPS:
5836 adapter->speed = SPEED_100000;
5838 case IBMVNIC_200GBPS:
5839 adapter->speed = SPEED_200000;
5842 if (netif_carrier_ok(netdev))
5843 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5844 adapter->speed = SPEED_UNKNOWN;
5846 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5847 adapter->duplex = DUPLEX_FULL;
5848 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5849 adapter->duplex = DUPLEX_HALF;
5851 adapter->duplex = DUPLEX_UNKNOWN;
5856 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5857 struct ibmvnic_adapter *adapter)
5859 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5860 struct net_device *netdev = adapter->netdev;
5861 struct device *dev = &adapter->vdev->dev;
5862 u64 *u64_crq = (u64 *)crq;
5865 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5866 (unsigned long)cpu_to_be64(u64_crq[0]),
5867 (unsigned long)cpu_to_be64(u64_crq[1]));
5868 switch (gen_crq->first) {
5869 case IBMVNIC_CRQ_INIT_RSP:
5870 switch (gen_crq->cmd) {
5871 case IBMVNIC_CRQ_INIT:
5872 dev_info(dev, "Partner initialized\n");
5873 adapter->from_passive_init = true;
5874 /* Discard any stale login responses from prev reset.
5875 * CHECK: should we clear even on INIT_COMPLETE?
5877 adapter->login_pending = false;
5879 if (adapter->state == VNIC_DOWN)
5880 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5882 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5884 if (rc && rc != -EBUSY) {
5885 /* We were unable to schedule the failover
5886 * reset either because the adapter was still
5887 * probing (eg: during kexec) or we could not
5888 * allocate memory. Clear the failover_pending
5889 * flag since no one else will. We ignore
5890 * EBUSY because it means either FAILOVER reset
5891 * is already scheduled or the adapter is
5895 "Error %ld scheduling failover reset\n",
5897 adapter->failover_pending = false;
5900 if (!completion_done(&adapter->init_done)) {
5901 if (!adapter->init_done_rc)
5902 adapter->init_done_rc = -EAGAIN;
5903 complete(&adapter->init_done);
5907 case IBMVNIC_CRQ_INIT_COMPLETE:
5908 dev_info(dev, "Partner initialization complete\n");
5909 adapter->crq.active = true;
5910 send_version_xchg(adapter);
5913 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5916 case IBMVNIC_CRQ_XPORT_EVENT:
5917 netif_carrier_off(netdev);
5918 adapter->crq.active = false;
5919 /* terminate any thread waiting for a response
5922 if (!completion_done(&adapter->fw_done)) {
5923 adapter->fw_done_rc = -EIO;
5924 complete(&adapter->fw_done);
5927 /* if we got here during crq-init, retry crq-init */
5928 if (!completion_done(&adapter->init_done)) {
5929 adapter->init_done_rc = -EAGAIN;
5930 complete(&adapter->init_done);
5933 if (!completion_done(&adapter->stats_done))
5934 complete(&adapter->stats_done);
5935 if (test_bit(0, &adapter->resetting))
5936 adapter->force_reset_recovery = true;
5937 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5938 dev_info(dev, "Migrated, re-enabling adapter\n");
5939 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5940 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5941 dev_info(dev, "Backing device failover detected\n");
5942 adapter->failover_pending = true;
5944 /* The adapter lost the connection */
5945 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5947 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5950 case IBMVNIC_CRQ_CMD_RSP:
5953 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5958 switch (gen_crq->cmd) {
5959 case VERSION_EXCHANGE_RSP:
5960 rc = crq->version_exchange_rsp.rc.code;
5962 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5966 be16_to_cpu(crq->version_exchange_rsp.version);
5967 dev_info(dev, "Partner protocol version is %d\n",
5969 send_query_cap(adapter);
5971 case QUERY_CAPABILITY_RSP:
5972 handle_query_cap_rsp(crq, adapter);
5975 handle_query_map_rsp(crq, adapter);
5977 case REQUEST_MAP_RSP:
5978 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5979 complete(&adapter->fw_done);
5981 case REQUEST_UNMAP_RSP:
5982 handle_request_unmap_rsp(crq, adapter);
5984 case REQUEST_CAPABILITY_RSP:
5985 handle_request_cap_rsp(crq, adapter);
5988 netdev_dbg(netdev, "Got Login Response\n");
5989 handle_login_rsp(crq, adapter);
5991 case LOGICAL_LINK_STATE_RSP:
5993 "Got Logical Link State Response, state: %d rc: %d\n",
5994 crq->logical_link_state_rsp.link_state,
5995 crq->logical_link_state_rsp.rc.code);
5996 adapter->logical_link_state =
5997 crq->logical_link_state_rsp.link_state;
5998 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5999 complete(&adapter->init_done);
6001 case LINK_STATE_INDICATION:
6002 netdev_dbg(netdev, "Got Logical Link State Indication\n");
6003 adapter->phys_link_state =
6004 crq->link_state_indication.phys_link_state;
6005 adapter->logical_link_state =
6006 crq->link_state_indication.logical_link_state;
6007 if (adapter->phys_link_state && adapter->logical_link_state)
6008 netif_carrier_on(netdev);
6010 netif_carrier_off(netdev);
6012 case CHANGE_MAC_ADDR_RSP:
6013 netdev_dbg(netdev, "Got MAC address change Response\n");
6014 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6016 case ERROR_INDICATION:
6017 netdev_dbg(netdev, "Got Error Indication\n");
6018 handle_error_indication(crq, adapter);
6020 case REQUEST_STATISTICS_RSP:
6021 netdev_dbg(netdev, "Got Statistics Response\n");
6022 complete(&adapter->stats_done);
6024 case QUERY_IP_OFFLOAD_RSP:
6025 netdev_dbg(netdev, "Got Query IP offload Response\n");
6026 handle_query_ip_offload_rsp(adapter);
6028 case MULTICAST_CTRL_RSP:
6029 netdev_dbg(netdev, "Got multicast control Response\n");
6031 case CONTROL_IP_OFFLOAD_RSP:
6032 netdev_dbg(netdev, "Got Control IP offload Response\n");
6033 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6034 sizeof(adapter->ip_offload_ctrl),
6036 complete(&adapter->init_done);
6038 case COLLECT_FW_TRACE_RSP:
6039 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
6040 complete(&adapter->fw_done);
6042 case GET_VPD_SIZE_RSP:
6043 handle_vpd_size_rsp(crq, adapter);
6046 handle_vpd_rsp(crq, adapter);
6048 case QUERY_PHYS_PARMS_RSP:
6049 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6050 complete(&adapter->fw_done);
6053 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
6058 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
6060 struct ibmvnic_adapter *adapter = instance;
6062 tasklet_schedule(&adapter->tasklet);
6066 static void ibmvnic_tasklet(struct tasklet_struct *t)
6068 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6069 struct ibmvnic_crq_queue *queue = &adapter->crq;
6070 union ibmvnic_crq *crq;
6071 unsigned long flags;
6073 spin_lock_irqsave(&queue->lock, flags);
6075 /* Pull all the valid messages off the CRQ */
6076 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6077 /* This barrier makes sure ibmvnic_next_crq()'s
6078 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
6079 * before ibmvnic_handle_crq()'s
6080 * switch(gen_crq->first) and switch(gen_crq->cmd).
6083 ibmvnic_handle_crq(crq, adapter);
6084 crq->generic.first = 0;
6087 spin_unlock_irqrestore(&queue->lock, flags);
6090 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6092 struct vio_dev *vdev = adapter->vdev;
6096 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
6097 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
6100 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6105 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6107 struct ibmvnic_crq_queue *crq = &adapter->crq;
6108 struct device *dev = &adapter->vdev->dev;
6109 struct vio_dev *vdev = adapter->vdev;
6114 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6115 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6117 /* Clean out the queue */
6121 memset(crq->msgs, 0, PAGE_SIZE);
6123 crq->active = false;
6125 /* And re-open it again */
6126 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6127 crq->msg_token, PAGE_SIZE);
6130 /* Adapter is good, but other end is not ready */
6131 dev_warn(dev, "Partner adapter not ready\n");
6133 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
6138 static void release_crq_queue(struct ibmvnic_adapter *adapter)
6140 struct ibmvnic_crq_queue *crq = &adapter->crq;
6141 struct vio_dev *vdev = adapter->vdev;
6147 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6148 free_irq(vdev->irq, adapter);
6149 tasklet_kill(&adapter->tasklet);
6151 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6152 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6154 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
6156 free_page((unsigned long)crq->msgs);
6158 crq->active = false;
6161 static int init_crq_queue(struct ibmvnic_adapter *adapter)
6163 struct ibmvnic_crq_queue *crq = &adapter->crq;
6164 struct device *dev = &adapter->vdev->dev;
6165 struct vio_dev *vdev = adapter->vdev;
6166 int rc, retrc = -ENOMEM;
6171 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
6172 /* Should we allocate more than one page? */
6177 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
6178 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
6180 if (dma_mapping_error(dev, crq->msg_token))
6183 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6184 crq->msg_token, PAGE_SIZE);
6186 if (rc == H_RESOURCE)
6187 /* maybe kexecing and resource is busy. try a reset */
6188 rc = ibmvnic_reset_crq(adapter);
6191 if (rc == H_CLOSED) {
6192 dev_warn(dev, "Partner adapter not ready\n");
6194 dev_warn(dev, "Error %d opening adapter\n", rc);
6195 goto reg_crq_failed;
6200 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
6202 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6203 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
6204 adapter->vdev->unit_address);
6205 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6207 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
6209 goto req_irq_failed;
6212 rc = vio_enable_interrupts(vdev);
6214 dev_err(dev, "Error %d enabling interrupts\n", rc);
6215 goto req_irq_failed;
6219 spin_lock_init(&crq->lock);
6221 /* process any CRQs that were queued before we enabled interrupts */
6222 tasklet_schedule(&adapter->tasklet);
6227 tasklet_kill(&adapter->tasklet);
6229 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6230 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6232 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
6234 free_page((unsigned long)crq->msgs);
6239 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6241 struct device *dev = &adapter->vdev->dev;
6242 unsigned long timeout = msecs_to_jiffies(20000);
6243 u64 old_num_rx_queues = adapter->req_rx_queues;
6244 u64 old_num_tx_queues = adapter->req_tx_queues;
6247 adapter->from_passive_init = false;
6249 rc = ibmvnic_send_crq_init(adapter);
6251 dev_err(dev, "Send crq init failed with error %d\n", rc);
6255 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6256 dev_err(dev, "Initialization sequence timed out\n");
6260 if (adapter->init_done_rc) {
6261 release_crq_queue(adapter);
6262 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
6263 return adapter->init_done_rc;
6266 if (adapter->from_passive_init) {
6267 adapter->state = VNIC_OPEN;
6268 adapter->from_passive_init = false;
6269 dev_err(dev, "CRQ-init failed, passive-init\n");
6274 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
6275 adapter->reset_reason != VNIC_RESET_MOBILITY) {
6276 if (adapter->req_rx_queues != old_num_rx_queues ||
6277 adapter->req_tx_queues != old_num_tx_queues) {
6278 release_sub_crqs(adapter, 0);
6279 rc = init_sub_crqs(adapter);
6281 /* no need to reinitialize completely, but we do
6282 * need to clean up transmits that were in flight
6283 * when we processed the reset. Failure to do so
6284 * will confound the upper layer, usually TCP, by
6285 * creating the illusion of transmits that are
6286 * awaiting completion.
6288 clean_tx_pools(adapter);
6290 rc = reset_sub_crq_queues(adapter);
6293 rc = init_sub_crqs(adapter);
6297 dev_err(dev, "Initialization of sub crqs failed\n");
6298 release_crq_queue(adapter);
6302 rc = init_sub_crq_irqs(adapter);
6304 dev_err(dev, "Failed to initialize sub crq irqs\n");
6305 release_crq_queue(adapter);
6311 static struct device_attribute dev_attr_failover;
6313 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6315 struct ibmvnic_adapter *adapter;
6316 struct net_device *netdev;
6317 unsigned char *mac_addr_p;
6318 unsigned long flags;
6322 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6325 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6326 VETH_MAC_ADDR, NULL);
6329 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6330 __FILE__, __LINE__);
6334 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6335 IBMVNIC_MAX_QUEUES);
6339 adapter = netdev_priv(netdev);
6340 adapter->state = VNIC_PROBING;
6341 dev_set_drvdata(&dev->dev, netdev);
6342 adapter->vdev = dev;
6343 adapter->netdev = netdev;
6344 adapter->login_pending = false;
6345 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6346 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6347 bitmap_set(adapter->map_ids, 0, 1);
6349 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6350 eth_hw_addr_set(netdev, adapter->mac_addr);
6351 netdev->irq = dev->irq;
6352 netdev->netdev_ops = &ibmvnic_netdev_ops;
6353 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6354 SET_NETDEV_DEV(netdev, &dev->dev);
6356 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6357 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6358 __ibmvnic_delayed_reset);
6359 INIT_LIST_HEAD(&adapter->rwi_list);
6360 spin_lock_init(&adapter->rwi_lock);
6361 spin_lock_init(&adapter->state_lock);
6362 mutex_init(&adapter->fw_lock);
6363 init_completion(&adapter->probe_done);
6364 init_completion(&adapter->init_done);
6365 init_completion(&adapter->fw_done);
6366 init_completion(&adapter->reset_done);
6367 init_completion(&adapter->stats_done);
6368 clear_bit(0, &adapter->resetting);
6369 adapter->prev_rx_buf_sz = 0;
6370 adapter->prev_mtu = 0;
6372 init_success = false;
6374 reinit_init_done(adapter);
6376 /* clear any failovers we got in the previous pass
6377 * since we are reinitializing the CRQ
6379 adapter->failover_pending = false;
6381 /* If we had already initialized CRQ, we may have one or
6382 * more resets queued already. Discard those and release
6383 * the CRQ before initializing the CRQ again.
6385 release_crq_queue(adapter);
6387 /* Since we are still in PROBING state, __ibmvnic_reset()
6388 * will not access the ->rwi_list and since we released CRQ,
6389 * we won't get _new_ transport events. But there maybe an
6390 * ongoing ibmvnic_reset() call. So serialize access to
6391 * rwi_list. If we win the race, ibvmnic_reset() could add
6392 * a reset after we purged but thats ok - we just may end
6393 * up with an extra reset (i.e similar to having two or more
6394 * resets in the queue at once).
6397 spin_lock_irqsave(&adapter->rwi_lock, flags);
6398 flush_reset_queue(adapter);
6399 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6401 rc = init_crq_queue(adapter);
6403 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6405 goto ibmvnic_init_fail;
6408 rc = ibmvnic_reset_init(adapter, false);
6409 } while (rc == -EAGAIN);
6411 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
6412 * partner is not ready. CRQ is not active. When the partner becomes
6413 * ready, we will do the passive init reset.
6417 init_success = true;
6419 rc = init_stats_buffers(adapter);
6421 goto ibmvnic_init_fail;
6423 rc = init_stats_token(adapter);
6425 goto ibmvnic_stats_fail;
6427 rc = device_create_file(&dev->dev, &dev_attr_failover);
6429 goto ibmvnic_dev_file_err;
6431 netif_carrier_off(netdev);
6434 adapter->state = VNIC_PROBED;
6435 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6436 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6437 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6439 adapter->state = VNIC_DOWN;
6442 adapter->wait_for_reset = false;
6443 adapter->last_reset_time = jiffies;
6445 rc = register_netdev(netdev);
6447 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6448 goto ibmvnic_register_fail;
6450 dev_info(&dev->dev, "ibmvnic registered\n");
6452 rc = ibmvnic_cpu_notif_add(adapter);
6454 netdev_err(netdev, "Registering cpu notifier failed\n");
6455 goto cpu_notif_add_failed;
6458 complete(&adapter->probe_done);
6462 cpu_notif_add_failed:
6463 unregister_netdev(netdev);
6465 ibmvnic_register_fail:
6466 device_remove_file(&dev->dev, &dev_attr_failover);
6468 ibmvnic_dev_file_err:
6469 release_stats_token(adapter);
6472 release_stats_buffers(adapter);
6475 release_sub_crqs(adapter, 1);
6476 release_crq_queue(adapter);
6478 /* cleanup worker thread after releasing CRQ so we don't get
6479 * transport events (i.e new work items for the worker thread).
6481 adapter->state = VNIC_REMOVING;
6482 complete(&adapter->probe_done);
6483 flush_work(&adapter->ibmvnic_reset);
6484 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6486 flush_reset_queue(adapter);
6488 mutex_destroy(&adapter->fw_lock);
6489 free_netdev(netdev);
6494 static void ibmvnic_remove(struct vio_dev *dev)
6496 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6497 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6498 unsigned long flags;
6500 spin_lock_irqsave(&adapter->state_lock, flags);
6502 /* If ibmvnic_reset() is scheduling a reset, wait for it to
6503 * finish. Then, set the state to REMOVING to prevent it from
6504 * scheduling any more work and to have reset functions ignore
6505 * any resets that have already been scheduled. Drop the lock
6506 * after setting state, so __ibmvnic_reset() which is called
6507 * from the flush_work() below, can make progress.
6509 spin_lock(&adapter->rwi_lock);
6510 adapter->state = VNIC_REMOVING;
6511 spin_unlock(&adapter->rwi_lock);
6513 spin_unlock_irqrestore(&adapter->state_lock, flags);
6515 ibmvnic_cpu_notif_remove(adapter);
6517 flush_work(&adapter->ibmvnic_reset);
6518 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6521 unregister_netdevice(netdev);
6523 release_resources(adapter);
6524 release_rx_pools(adapter);
6525 release_tx_pools(adapter);
6526 release_sub_crqs(adapter, 1);
6527 release_crq_queue(adapter);
6529 release_stats_token(adapter);
6530 release_stats_buffers(adapter);
6532 adapter->state = VNIC_REMOVED;
6535 mutex_destroy(&adapter->fw_lock);
6536 device_remove_file(&dev->dev, &dev_attr_failover);
6537 free_netdev(netdev);
6538 dev_set_drvdata(&dev->dev, NULL);
6541 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6542 const char *buf, size_t count)
6544 struct net_device *netdev = dev_get_drvdata(dev);
6545 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6546 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6547 __be64 session_token;
6550 if (!sysfs_streq(buf, "1"))
6553 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6554 H_GET_SESSION_TOKEN, 0, 0, 0);
6556 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6561 session_token = (__be64)retbuf[0];
6562 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6563 be64_to_cpu(session_token));
6564 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6565 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6568 "H_VIOCTL initiated failover failed, rc %ld\n",
6576 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6577 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6581 static DEVICE_ATTR_WO(failover);
6583 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6585 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6586 struct ibmvnic_adapter *adapter;
6587 struct iommu_table *tbl;
6588 unsigned long ret = 0;
6591 tbl = get_iommu_table_base(&vdev->dev);
6593 /* netdev inits at probe time along with the structures we need below*/
6595 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6597 adapter = netdev_priv(netdev);
6599 ret += PAGE_SIZE; /* the crq message queue */
6600 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6602 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6603 ret += 4 * PAGE_SIZE; /* the scrq message queue */
6605 for (i = 0; i < adapter->num_active_rx_pools; i++)
6606 ret += adapter->rx_pool[i].size *
6607 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6612 static int ibmvnic_resume(struct device *dev)
6614 struct net_device *netdev = dev_get_drvdata(dev);
6615 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6617 if (adapter->state != VNIC_OPEN)
6620 tasklet_schedule(&adapter->tasklet);
6625 static const struct vio_device_id ibmvnic_device_table[] = {
6626 {"network", "IBM,vnic"},
6629 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6631 static const struct dev_pm_ops ibmvnic_pm_ops = {
6632 .resume = ibmvnic_resume
6635 static struct vio_driver ibmvnic_driver = {
6636 .id_table = ibmvnic_device_table,
6637 .probe = ibmvnic_probe,
6638 .remove = ibmvnic_remove,
6639 .get_desired_dma = ibmvnic_get_desired_dma,
6640 .name = ibmvnic_driver_name,
6641 .pm = &ibmvnic_pm_ops,
6644 /* module functions */
6645 static int __init ibmvnic_module_init(void)
6649 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
6651 ibmvnic_cpu_down_prep);
6654 ibmvnic_online = ret;
6655 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
6656 NULL, ibmvnic_cpu_dead);
6660 ret = vio_register_driver(&ibmvnic_driver);
6662 goto err_vio_register;
6664 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6665 IBMVNIC_DRIVER_VERSION);
6669 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6671 cpuhp_remove_multi_state(ibmvnic_online);
6676 static void __exit ibmvnic_module_exit(void)
6678 vio_unregister_driver(&ibmvnic_driver);
6679 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6680 cpuhp_remove_multi_state(ibmvnic_online);
6683 module_init(ibmvnic_module_init);
6684 module_exit(ibmvnic_module_exit);